summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/base/property.c63
-rw-r--r--drivers/bcma/main.c2
-rw-r--r--drivers/bluetooth/btintel.c239
-rw-r--r--drivers/bluetooth/btintel.h11
-rw-r--r--drivers/bluetooth/btmrvl_main.c6
-rw-r--r--drivers/bluetooth/btmtkuart.c13
-rw-r--r--drivers/bluetooth/btrsi.c1
-rw-r--r--drivers/bluetooth/btrtl.c26
-rw-r--r--drivers/bluetooth/btusb.c64
-rw-r--r--drivers/bluetooth/hci_h5.c35
-rw-r--r--drivers/bluetooth/hci_ldisc.c3
-rw-r--r--drivers/bluetooth/hci_qca.c5
-rw-r--r--drivers/bluetooth/hci_vhci.c122
-rw-r--r--drivers/hsi/clients/ssi_protocol.c4
-rw-r--r--drivers/infiniband/hw/mlx4/main.c2
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c2
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c1
-rw-r--r--drivers/infiniband/hw/qedr/main.c2
-rw-r--r--drivers/net/appletalk/cops.c2
-rw-r--r--drivers/net/appletalk/ltpc.c3
-rw-r--r--drivers/net/arcnet/arc-rimi.c5
-rw-r--r--drivers/net/arcnet/arcdevice.h5
-rw-r--r--drivers/net/arcnet/com20020-isa.c2
-rw-r--r--drivers/net/arcnet/com20020-pci.c2
-rw-r--r--drivers/net/arcnet/com20020.c4
-rw-r--r--drivers/net/arcnet/com20020_cs.c2
-rw-r--r--drivers/net/arcnet/com90io.c2
-rw-r--r--drivers/net/arcnet/com90xx.c3
-rw-r--r--drivers/net/bonding/bond_main.c2
-rw-r--r--drivers/net/bonding/bond_sysfs.c4
-rw-r--r--drivers/net/dsa/Kconfig1
-rw-r--r--drivers/net/dsa/Makefile2
-rw-r--r--drivers/net/dsa/b53/b53_common.c59
-rw-r--r--drivers/net/dsa/b53/b53_priv.h1
-rw-r--r--drivers/net/dsa/bcm_sf2.c4
-rw-r--r--drivers/net/dsa/ocelot/felix.c4
-rw-r--r--drivers/net/dsa/qca8k.c435
-rw-r--r--drivers/net/dsa/qca8k.h35
-rw-r--r--drivers/net/dsa/realtek-smi-core.c4
-rw-r--r--drivers/net/dsa/realtek-smi-core.h4
-rw-r--r--drivers/net/dsa/rtl8365mb.c1982
-rw-r--r--drivers/net/dsa/rtl8366.c96
-rw-r--r--drivers/net/dsa/rtl8366rb.c301
-rw-r--r--drivers/net/dsa/sja1105/sja1105.h27
-rw-r--r--drivers/net/dsa/sja1105/sja1105_clocking.c35
-rw-r--r--drivers/net/dsa/sja1105/sja1105_main.c134
-rw-r--r--drivers/net/dsa/sja1105/sja1105_vl.c15
-rw-r--r--drivers/net/ethernet/3com/3c509.c2
-rw-r--r--drivers/net/ethernet/3com/3c515.c5
-rw-r--r--drivers/net/ethernet/3com/3c574_cs.c11
-rw-r--r--drivers/net/ethernet/3com/3c589_cs.c10
-rw-r--r--drivers/net/ethernet/3com/3c59x.c4
-rw-r--r--drivers/net/ethernet/8390/apne.c3
-rw-r--r--drivers/net/ethernet/8390/ax88796.c12
-rw-r--r--drivers/net/ethernet/8390/axnet_cs.c7
-rw-r--r--drivers/net/ethernet/8390/mcf8390.c3
-rw-r--r--drivers/net/ethernet/8390/ne.c4
-rw-r--r--drivers/net/ethernet/8390/ne2k-pci.c2
-rw-r--r--drivers/net/ethernet/8390/pcnet_cs.c22
-rw-r--r--drivers/net/ethernet/8390/stnic.c5
-rw-r--r--drivers/net/ethernet/8390/zorro8390.c3
-rw-r--r--drivers/net/ethernet/Kconfig1
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/actions/owl-emac.c6
-rw-r--r--drivers/net/ethernet/adaptec/starfire.c14
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c8
-rw-r--r--drivers/net/ethernet/agere/et131x.c4
-rw-r--r--drivers/net/ethernet/alacritech/slicoss.c4
-rw-r--r--drivers/net/ethernet/allwinner/sun4i-emac.c4
-rw-r--r--drivers/net/ethernet/alteon/acenic.c20
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c4
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c2
-rw-r--r--drivers/net/ethernet/amd/Kconfig2
-rw-r--r--drivers/net/ethernet/amd/amd8111e.c6
-rw-r--r--drivers/net/ethernet/amd/atarilance.c4
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c2
-rw-r--r--drivers/net/ethernet/amd/nmclan_cs.c5
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c15
-rw-r--r--drivers/net/ethernet/amd/sun3lance.c4
-rw-r--r--drivers/net/ethernet/amd/sunlance.c4
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c4
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-main.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h2
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/mac.c2
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/main.c2
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.c2
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c2
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c2
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c2
-rw-r--r--drivers/net/ethernet/apple/bmac.c15
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw.h6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_macsec.c2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c8
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c2
-rw-r--r--drivers/net/ethernet/arc/Kconfig4
-rw-r--r--drivers/net/ethernet/arc/emac_main.c4
-rw-r--r--drivers/net/ethernet/arc/emac_mdio.c9
-rw-r--r--drivers/net/ethernet/asix/Kconfig35
-rw-r--r--drivers/net/ethernet/asix/Makefile6
-rw-r--r--drivers/net/ethernet/asix/ax88796c_ioctl.c239
-rw-r--r--drivers/net/ethernet/asix/ax88796c_ioctl.h26
-rw-r--r--drivers/net/ethernet/asix/ax88796c_main.c1163
-rw-r--r--drivers/net/ethernet/asix/ax88796c_main.h568
-rw-r--r--drivers/net/ethernet/asix/ax88796c_spi.c115
-rw-r--r--drivers/net/ethernet/asix/ax88796c_spi.h69
-rw-r--r--drivers/net/ethernet/atheros/ag71xx.c4
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c4
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c12
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c10
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c2
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c4
-rw-r--r--drivers/net/ethernet/atheros/atlx/atlx.c2
-rw-r--r--drivers/net/ethernet/broadcom/b44.c12
-rw-r--r--drivers/net/ethernet/broadcom/bcm4908_enet.c4
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c6
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c6
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c6
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-bcma.c37
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-platform.c2
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c22
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c9
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c15
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c50
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h13
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c2
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c74
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h8
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c195
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c60
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c5
-rw-r--r--drivers/net/ethernet/cadence/macb.h7
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c20
-rw-r--r--drivers/net/ethernet/cadence/macb_ptp.c13
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c8
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_core.c3
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c29
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_main.c4
-rw-r--r--drivers/net/ethernet/cavium/octeon/octeon_mgmt.c2
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_main.c3
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c11
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c9
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cxgb2.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/gmac.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/pm3393.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/subr.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/vsc7326.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/common.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/t3_hw.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/xgmac.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/adapter.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c8
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c2
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.h2
-rw-r--r--drivers/net/ethernet/cirrus/cs89x0.c13
-rw-r--r--drivers/net/ethernet/cirrus/ep93xx_eth.c2
-rw-r--r--drivers/net/ethernet/cirrus/mac89x0.c2
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_ethtool.c4
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c9
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_pp.c2
-rw-r--r--drivers/net/ethernet/cortina/gemini.c6
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c9
-rw-r--r--drivers/net/ethernet/dec/tulip/de2104x.c15
-rw-r--r--drivers/net/ethernet/dec/tulip/de4x5.c35
-rw-r--r--drivers/net/ethernet/dec/tulip/dmfe.c9
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c45
-rw-r--r--drivers/net/ethernet/dec/tulip/uli526x.c11
-rw-r--r--drivers/net/ethernet/dec/tulip/winbond-840.c6
-rw-r--r--drivers/net/ethernet/dec/tulip/xircom_cb.c4
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c5
-rw-r--r--drivers/net/ethernet/dlink/sundance.c6
-rw-r--r--drivers/net/ethernet/dnet.c8
-rw-r--r--drivers/net/ethernet/ec_bhf.c4
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c7
-rw-r--r--drivers/net/ethernet/ethoc.c28
-rw-r--r--drivers/net/ethernet/ezchip/Kconfig2
-rw-r--r--drivers/net/ethernet/ezchip/nps_enet.c4
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c9
-rw-r--r--drivers/net/ethernet/fealnx.c8
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c6
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-devlink.c21
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c24
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h7
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c58
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c2
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c2
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c332
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.h4
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_hw.h6
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.c24
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ptp.c6
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_qos.c18
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_vf.c16
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c7
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx.c4
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_dtsec.c8
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_dtsec.h2
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_memac.c8
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_memac.h2
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_tgec.c8
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_tgec.h2
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.h2
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c2
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c2
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c4
-rw-r--r--drivers/net/ethernet/fujitsu/fmvj18x_cs.c14
-rw-r--r--drivers/net/ethernet/google/gve/gve.h31
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.c2
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.h1
-rw-r--r--drivers/net/ethernet/google/gve/gve_ethtool.c3
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c101
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx.c98
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx.c117
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx_dqo.c84
-rw-r--r--drivers/net/ethernet/google/gve/gve_utils.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hip04_eth.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hisi_femac.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hix5hd2_gmac.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.h4
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c7
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h5
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h9
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c78
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h5
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c18
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c48
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.c18
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_devlink.c4
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_devlink.h2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c13
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_main.c12
-rw-r--r--drivers/net/ethernet/i825xx/sun3_82586.c7
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c4
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c14
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c46
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c645
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h10
-rw-r--r--drivers/net/ethernet/intel/Kconfig14
-rw-r--r--drivers/net/ethernet/intel/e100.c4
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c4
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h1
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c5
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_netdev.c2
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pci.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c52
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf.h12
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c194
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_virtchnl.c6
-rw-r--r--drivers/net/ethernet/intel/ice/Makefile5
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h206
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h92
-rw-r--r--drivers/net/ethernet/intel/ice/ice_arfs.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c121
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.h8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c129
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb.c225
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb.h18
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.c216
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.h32
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_nl.c192
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devids.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devlink.c254
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devlink.h8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch.c649
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch.h83
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c235
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fdir.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fdir.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.c275
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.h14
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_type.h13
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fltr.c80
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fltr.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h43
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c847
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h36
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c1383
-rw-r--r--drivers/net/ethernet/intel/ice/ice_protocol_type.h169
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.c372
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.h24
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.c151
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.h22
-rw-r--r--drivers/net/ethernet/intel/ice/ice_repr.c386
-rw-r--r--drivers/net/ethernet/intel/ice/ice_repr.h28
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c184
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.h8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c2473
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.h149
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tc_lib.c1056
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tc_lib.h152
-rw-r--r--drivers/net/ethernet/intel/ice/ice_trace.h28
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c326
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h147
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.c102
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.h14
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h19
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c403
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h74
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c158
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.h20
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c4
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c8
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c4
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ptp.c2
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_hw.c2
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_hw.h2
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h23
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c9
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c54
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c16
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c6
-rw-r--r--drivers/net/ethernet/jme.c4
-rw-r--r--drivers/net/ethernet/korina.c4
-rw-r--r--drivers/net/ethernet/lantiq_etop.c21
-rw-r--r--drivers/net/ethernet/lantiq_xrx200.c87
-rw-r--r--drivers/net/ethernet/litex/Kconfig2
-rw-r--r--drivers/net/ethernet/litex/litex_liteeth.c2
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c16
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c19
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c13
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c11
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/common.h1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h5
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h138
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/npc.h9
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h572
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/ptp.c133
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/ptp.h1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rpm.c17
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rpm.h3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c76
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h19
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c13
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c601
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c16
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c222
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c96
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h18
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/Makefile6
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c52
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h18
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c21
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c38
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c234
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c133
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c273
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h16
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c8
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_devlink.c35
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_devlink.h4
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_main.c17
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c18
-rw-r--r--drivers/net/ethernet/marvell/skge.c6
-rw-r--r--drivers/net/ethernet/marvell/sky2.c97
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c2
-rw-r--r--drivers/net/ethernet/mediatek/mtk_star_emac.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cq.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_main.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c40
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_port.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dev.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/health.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/qos.c102
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/qos.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rss.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.h27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c118
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c92
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c394
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c231
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_priv.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c66
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c72
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/lag.c)102
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/lag.h)9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c)4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/mp.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/lag_mp.h)0
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c611
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.h52
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/tout.c162
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/tout.h41
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c87
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c212
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h47
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxfw/mlxfw.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c90
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_env.c372
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_env.h23
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/item.h56
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/minimal.c66
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h351
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/resources.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c350
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c45
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c432
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h27
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c540
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c315
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h1
-rw-r--r--drivers/net/ethernet/micrel/ks8842.c15
-rw-r--r--drivers/net/ethernet/micrel/ks8851.h2
-rw-r--r--drivers/net/ethernet/micrel/ks8851_common.c14
-rw-r--r--drivers/net/ethernet/micrel/ks8851_par.c4
-rw-r--r--drivers/net/ethernet/micrel/ks8851_spi.c4
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c16
-rw-r--r--drivers/net/ethernet/microchip/enc28j60.c7
-rw-r--r--drivers/net/ethernet/microchip/encx24j600.c7
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c4
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.h1
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ptp.c91
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main.c3
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c6
-rw-r--r--drivers/net/ethernet/microsoft/mana/hw_channel.c4
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_en.c2
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.c2
-rw-r--r--drivers/net/ethernet/mscc/Kconfig2
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c274
-rw-r--r--drivers/net/ethernet/mscc/ocelot.h1
-rw-r--r--drivers/net/ethernet/mscc/ocelot_flower.c125
-rw-r--r--drivers/net/ethernet/mscc/ocelot_mrp.c8
-rw-r--r--drivers/net/ethernet/mscc/ocelot_net.c17
-rw-r--r--drivers/net/ethernet/mscc/ocelot_vsc7514.c9
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c9
-rw-r--r--drivers/net/ethernet/natsemi/natsemi.c6
-rw-r--r--drivers/net/ethernet/natsemi/ns83820.c11
-rw-r--r--drivers/net/ethernet/neterion/s2io.c6
-rw-r--r--drivers/net/ethernet/neterion/s2io.h2
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c6
-rw-r--r--drivers/net/ethernet/netronome/nfp/abm/main.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/abm/qdisc.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/devlink_param.c9
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c6
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_main.c11
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_repr.c3
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c2
-rw-r--r--drivers/net/ethernet/ni/nixge.c2
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c51
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c10
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c4
-rw-r--r--drivers/net/ethernet/packetengines/hamachi.c5
-rw-r--r--drivers/net/ethernet/packetengines/yellowfin.c6
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.c4
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic.h8
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_debugfs.c48
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.c1
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.h4
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_devlink.c10
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_ethtool.c38
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c264
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.h49
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_main.c92
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_phc.c8
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c241
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_rx_filter.h2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_stats.c121
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.c14
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c6
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h44
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c16
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.h143
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dbg_hsi.h1491
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.h11
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_debug.c1389
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_debug.h7
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c124
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev_api.h347
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_devlink.c12
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_fcoe.c25
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h12339
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hw.h222
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c405
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_ops.c98
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_ops.h60
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.h286
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iro_hsi.h500
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iscsi.c15
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iscsi.h9
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iwarp.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.c43
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.h135
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.c167
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.h131
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c23
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c66
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.h765
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mfw_hsi.h2474
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ooo.c20
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_rdma.c9
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_rdma.h7
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_reg_addr.h95
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_roce.c1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_selftest.h30
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp.h223
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp_commands.c10
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_spq.c63
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c201
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.h138
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.c13
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.h311
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_filter.c53
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c21
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c12
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c4
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-mac.c2
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.c5
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.c2
-rw-r--r--drivers/net/ethernet/qualcomm/qca_uart.c2
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c2
-rw-r--r--drivers/net/ethernet/rdc/r6040.c24
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c7
-rw-r--r--drivers/net/ethernet/realtek/8139too.c7
-rw-r--r--drivers/net/ethernet/realtek/atp.c4
-rw-r--r--drivers/net/ethernet/realtek/r8169.h2
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c44
-rw-r--r--drivers/net/ethernet/realtek/r8169_phy_config.c59
-rw-r--r--drivers/net/ethernet/renesas/ravb.h52
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c728
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c18
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c10
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h2
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_core.c3
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c9
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c2
-rw-r--r--drivers/net/ethernet/seeq/sgiseeq.c4
-rw-r--r--drivers/net/ethernet/sfc/ef10.c4
-rw-r--r--drivers/net/ethernet/sfc/ef100_nic.c2
-rw-r--r--drivers/net/ethernet/sfc/ef10_sriov.c4
-rw-r--r--drivers/net/ethernet/sfc/ef10_sriov.h6
-rw-r--r--drivers/net/ethernet/sfc/efx.c2
-rw-r--r--drivers/net/ethernet/sfc/efx_common.c4
-rw-r--r--drivers/net/ethernet/sfc/ethtool_common.c10
-rw-r--r--drivers/net/ethernet/sfc/falcon/efx.c6
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h2
-rw-r--r--drivers/net/ethernet/sfc/siena_sriov.c2
-rw-r--r--drivers/net/ethernet/sfc/siena_sriov.h2
-rw-r--r--drivers/net/ethernet/sgi/ioc3-eth.c4
-rw-r--r--drivers/net/ethernet/sgi/meth.c2
-rw-r--r--drivers/net/ethernet/silan/sc92031.c14
-rw-r--r--drivers/net/ethernet/sis/sis190.c10
-rw-r--r--drivers/net/ethernet/sis/sis900.c19
-rw-r--r--drivers/net/ethernet/smsc/epic100.c4
-rw-r--r--drivers/net/ethernet/smsc/smc911x.c4
-rw-r--r--drivers/net/ethernet/smsc/smc91c92_cs.c15
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c4
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c22
-rw-r--r--drivers/net/ethernet/smsc/smsc9420.c26
-rw-r--r--drivers/net/ethernet/socionext/netsec.c46
-rw-r--r--drivers/net/ethernet/socionext/sni_ave.c17
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c8
-rw-r--r--drivers/net/ethernet/sun/cassini.c7
-rw-r--r--drivers/net/ethernet/sun/ldmvsw.c7
-rw-r--r--drivers/net/ethernet/sun/niu.c46
-rw-r--r--drivers/net/ethernet/sun/sunbmac.c6
-rw-r--r--drivers/net/ethernet/sun/sungem.c15
-rw-r--r--drivers/net/ethernet/sun/sunhme.c23
-rw-r--r--drivers/net/ethernet/sun/sunqe.c4
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c4
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-common.c2
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c2
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-net.c2
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac.h2
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c8
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-ethtool.c2
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c26
-rw-r--r--drivers/net/ethernet/ti/cpmac.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw.c6
-rw-r--r--drivers/net/ethernet/ti/cpsw_new.c17
-rw-r--r--drivers/net/ethernet/ti/cpts.c6
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c8
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c8
-rw-r--r--drivers/net/ethernet/ti/tlan.c14
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.c2
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.c2
-rw-r--r--drivers/net/ethernet/toshiba/tc35815.c11
-rw-r--r--drivers/net/ethernet/via/via-rhine.c4
-rw-r--r--drivers/net/ethernet/via/via-velocity.c4
-rw-r--r--drivers/net/ethernet/wiznet/w5100-spi.c4
-rw-r--r--drivers/net/ethernet/wiznet/w5100.c11
-rw-r--r--drivers/net/ethernet/wiznet/w5100.h2
-rw-r--r--drivers/net/ethernet/wiznet/w5300.c4
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c4
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c2
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c11
-rw-r--r--drivers/net/ethernet/xircom/xirc2ps_cs.c14
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c7
-rw-r--r--drivers/net/fddi/defxx.c6
-rw-r--r--drivers/net/fddi/skfp/skfddi.c7
-rw-r--r--drivers/net/gtp.c2
-rw-r--r--drivers/net/hamradio/6pack.c6
-rw-r--r--drivers/net/hamradio/baycom_epp.c4
-rw-r--r--drivers/net/hamradio/bpqether.c7
-rw-r--r--drivers/net/hamradio/dmascc.c5
-rw-r--r--drivers/net/hamradio/hdlcdrv.c4
-rw-r--r--drivers/net/hamradio/mkiss.c6
-rw-r--r--drivers/net/hamradio/scc.c7
-rw-r--r--drivers/net/hamradio/yam.c4
-rw-r--r--drivers/net/hyperv/netvsc_drv.c6
-rw-r--r--drivers/net/ieee802154/ca8210.c2
-rw-r--r--drivers/net/ifb.c3
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c4
-rw-r--r--drivers/net/ipvlan/ipvtap.c2
-rw-r--r--drivers/net/macsec.c2
-rw-r--r--drivers/net/macvlan.c4
-rw-r--r--drivers/net/macvtap.c2
-rw-r--r--drivers/net/net_failover.c3
-rw-r--r--drivers/net/netdevsim/dev.c14
-rw-r--r--drivers/net/netdevsim/ethtool.c28
-rw-r--r--drivers/net/netdevsim/health.c32
-rw-r--r--drivers/net/netdevsim/netdevsim.h1
-rw-r--r--drivers/net/ntb_netdev.c2
-rw-r--r--drivers/net/phy/at803x.c196
-rw-r--r--drivers/net/phy/bcm7xxx.c201
-rw-r--r--drivers/net/phy/broadcom.c106
-rw-r--r--drivers/net/phy/dp83867.c19
-rw-r--r--drivers/net/phy/marvell10g.c107
-rw-r--r--drivers/net/phy/mdio_bus.c28
-rw-r--r--drivers/net/phy/micrel.c107
-rw-r--r--drivers/net/phy/mscc/mscc_main.c2
-rw-r--r--drivers/net/phy/phy_device.c10
-rw-r--r--drivers/net/phy/phylink.c89
-rw-r--r--drivers/net/phy/realtek.c8
-rw-r--r--drivers/net/ppp/ppp_generic.c2
-rw-r--r--drivers/net/team/team.c2
-rw-r--r--drivers/net/usb/aqc111.c4
-rw-r--r--drivers/net/usb/asix_common.c2
-rw-r--r--drivers/net/usb/asix_devices.c2
-rw-r--r--drivers/net/usb/ax88172a.c2
-rw-r--r--drivers/net/usb/ax88179_178a.c12
-rw-r--r--drivers/net/usb/catc.c2
-rw-r--r--drivers/net/usb/cdc-phonet.c4
-rw-r--r--drivers/net/usb/dm9601.c7
-rw-r--r--drivers/net/usb/ipheth.c2
-rw-r--r--drivers/net/usb/kalmia.c2
-rw-r--r--drivers/net/usb/lan78xx.c4
-rw-r--r--drivers/net/usb/mcs7830.c5
-rw-r--r--drivers/net/usb/pegasus.c2
-rw-r--r--drivers/net/usb/r8152.c4
-rw-r--r--drivers/net/usb/rndis_host.c2
-rw-r--r--drivers/net/usb/rtl8150.c4
-rw-r--r--drivers/net/usb/smsc75xx.c3
-rw-r--r--drivers/net/usb/smsc95xx.c3
-rw-r--r--drivers/net/usb/sr9700.c5
-rw-r--r--drivers/net/usb/sr9800.c2
-rw-r--r--drivers/net/virtio_net.c40
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c4
-rw-r--r--drivers/net/wan/lapbether.c2
-rw-r--r--drivers/net/wireless/ath/ar5523/ar5523.c3
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c31
-rw-r--r--drivers/net/wireless/ath/ath10k/sdio.c5
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c4
-rw-r--r--drivers/net/wireless/ath/ath11k/core.c58
-rw-r--r--drivers/net/wireless/ath/ath11k/core.h49
-rw-r--r--drivers/net/wireless/ath/ath11k/dbring.c16
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs.c25
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs.h4
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs_htt_stats.c4344
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs_htt_stats.h226
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs_sta.c8
-rw-r--r--drivers/net/wireless/ath/ath11k/dp.h8
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_rx.c243
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_tx.c23
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_tx.h2
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_desc.h2
-rw-r--r--drivers/net/wireless/ath/ath11k/hw.c45
-rw-r--r--drivers/net/wireless/ath/ath11k/hw.h13
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.c1443
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.h3
-rw-r--r--drivers/net/wireless/ath/ath11k/pci.c4
-rw-r--r--drivers/net/wireless/ath/ath11k/peer.c11
-rw-r--r--drivers/net/wireless/ath/ath11k/qmi.c350
-rw-r--r--drivers/net/wireless/ath/ath11k/qmi.h18
-rw-r--r--drivers/net/wireless/ath/ath11k/reg.c18
-rw-r--r--drivers/net/wireless/ath/ath11k/reg.h2
-rw-r--r--drivers/net/wireless/ath/ath11k/spectral.c42
-rw-r--r--drivers/net/wireless/ath/ath11k/trace.h11
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.c152
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.h107
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k_pci_owl_loader.c105
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.c12
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c56
-rw-r--r--drivers/net/wireless/ath/spectral_common.h1
-rw-r--r--drivers/net/wireless/ath/wcn36xx/debug.c2
-rw-r--r--drivers/net/wireless/ath/wcn36xx/hal.h6
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c11
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.c99
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.h3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c12
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c10
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c2
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2200.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.c370
-rw-r--r--drivers/net/wireless/microchip/wilc1000/cfg80211.c11
-rw-r--r--drivers/net/wireless/microchip/wilc1000/hif.c31
-rw-r--r--drivers/net/wireless/microchip/wilc1000/hif.h1
-rw-r--r--drivers/net/wireless/microchip/wilc1000/netdev.h2
-rw-r--r--drivers/net/wireless/microchip/wilc1000/sdio.c1
-rw-r--r--drivers/net/wireless/microchip/wilc1000/spi.c91
-rw-r--r--drivers/net/wireless/microchip/wilc1000/wlan.c134
-rw-r--r--drivers/net/wireless/microchip/wilc1000/wlan.h5
-rw-r--r--drivers/net/wireless/microchip/wilc1000/wlan_cfg.c1
-rw-r--r--drivers/net/wireless/microchip/wilc1000/wlan_if.h7
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800usb.c1
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c6
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h2
-rw-r--r--drivers/net/wireless/realtek/rtw88/debug.c46
-rw-r--r--drivers/net/wireless/realtek/rtw88/debug.h1
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.c54
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.h24
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.c22
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.h49
-rw-r--r--drivers/net/wireless/realtek/rtw88/phy.c119
-rw-r--r--drivers/net/wireless/realtek/rtw88/phy.h2
-rw-r--r--drivers/net/wireless/realtek/rtw88/reg.h6
-rw-r--r--drivers/net/wireless/realtek/rtw88/regd.c753
-rw-r--r--drivers/net/wireless/realtek/rtw88/regd.h8
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821c.c19
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822b.c46
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822b.h8
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c.c47
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c.h3
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_core.c2
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_hal.c10
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mac80211.c74
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_main.c16
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mgmt.c24
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_sdio.c5
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_usb.c5
-rw-r--r--drivers/net/wireless/rsi/rsi_hal.h11
-rw-r--r--drivers/net/wireless/rsi/rsi_main.h15
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_usb.c1
-rw-r--r--drivers/net/wwan/Kconfig1
-rw-r--r--drivers/net/wwan/iosm/Makefile5
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_chnl_cfg.c6
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_chnl_cfg.h1
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_coredump.c125
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_coredump.h59
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_devlink.c321
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_devlink.h205
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_flash.c594
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_flash.h229
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_imem.c107
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_imem.h18
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_imem_ops.c317
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_imem_ops.h49
-rw-r--r--drivers/net/xen-netback/netback.c2
-rw-r--r--drivers/nfc/fdp/i2c.c1
-rw-r--r--drivers/nfc/microread/i2c.c4
-rw-r--r--drivers/nfc/microread/mei.c6
-rw-r--r--drivers/nfc/nfcmrvl/fw_dnld.c4
-rw-r--r--drivers/nfc/pn533/i2c.c6
-rw-r--r--drivers/nfc/pn533/pn533.c6
-rw-r--r--drivers/nfc/pn533/pn533.h4
-rw-r--r--drivers/nfc/pn533/uart.c4
-rw-r--r--drivers/nfc/pn533/usb.c2
-rw-r--r--drivers/nfc/pn544/mei.c8
-rw-r--r--drivers/nfc/s3fwrn5/firmware.c29
-rw-r--r--drivers/nfc/s3fwrn5/nci.c18
-rw-r--r--drivers/nfc/st-nci/i2c.c4
-rw-r--r--drivers/nfc/st-nci/ndlc.c4
-rw-r--r--drivers/nfc/st-nci/se.c6
-rw-r--r--drivers/nfc/st-nci/spi.c4
-rw-r--r--drivers/nfc/st21nfca/i2c.c4
-rw-r--r--drivers/nfc/st21nfca/se.c4
-rw-r--r--drivers/nfc/trf7970a.c8
-rw-r--r--drivers/of/Kconfig4
-rw-r--r--drivers/of/Makefile1
-rw-r--r--drivers/of/of_net.c145
-rw-r--r--drivers/pcmcia/pcmcia_cis.c5
-rw-r--r--drivers/ptp/idt8a340_reg.h720
-rw-r--r--drivers/ptp/ptp_clockmatrix.c1452
-rw-r--r--drivers/ptp/ptp_clockmatrix.h109
-rw-r--r--drivers/ptp/ptp_ocp.c1316
-rw-r--r--drivers/s390/net/ctcm_fsms.c60
-rw-r--r--drivers/s390/net/ctcm_main.c38
-rw-r--r--drivers/s390/net/ctcm_mpc.c8
-rw-r--r--drivers/s390/net/fsm.c2
-rw-r--r--drivers/s390/net/ism_drv.c2
-rw-r--r--drivers/s390/net/lcs.c121
-rw-r--r--drivers/s390/net/netiucv.c104
-rw-r--r--drivers/scsi/qedf/drv_fcoe_fw_funcs.c8
-rw-r--r--drivers/scsi/qedf/drv_fcoe_fw_funcs.h2
-rw-r--r--drivers/scsi/qedf/qedf.h4
-rw-r--r--drivers/scsi/qedf/qedf_els.c2
-rw-r--r--drivers/scsi/qedf/qedf_io.c12
-rw-r--r--drivers/scsi/qedf/qedf_main.c10
-rw-r--r--drivers/scsi/qedi/qedi_debugfs.c4
-rw-r--r--drivers/scsi/qedi/qedi_fw.c40
-rw-r--r--drivers/scsi/qedi/qedi_fw_api.c22
-rw-r--r--drivers/scsi/qedi/qedi_fw_iscsi.h2
-rw-r--r--drivers/scsi/qedi/qedi_iscsi.h2
-rw-r--r--drivers/scsi/qedi/qedi_main.c11
-rw-r--r--drivers/soc/fsl/Kconfig1
-rw-r--r--drivers/soc/fsl/dpio/dpio-cmd.h3
-rw-r--r--drivers/soc/fsl/dpio/dpio-driver.c1
-rw-r--r--drivers/soc/fsl/dpio/dpio-service.c117
-rw-r--r--drivers/soc/fsl/dpio/dpio.c1
-rw-r--r--drivers/soc/fsl/dpio/dpio.h2
-rw-r--r--drivers/soc/fsl/dpio/qbman-portal.c58
-rw-r--r--drivers/soc/fsl/dpio/qbman-portal.h13
-rw-r--r--drivers/staging/qlge/qlge_main.c12
-rw-r--r--drivers/usb/gadget/function/f_phonet.c5
919 files changed, 51693 insertions, 21842 deletions
diff --git a/drivers/base/property.c b/drivers/base/property.c
index 453918eb7390..f1f35b48ab8b 100644
--- a/drivers/base/property.c
+++ b/drivers/base/property.c
@@ -15,7 +15,6 @@
#include <linux/of_graph.h>
#include <linux/of_irq.h>
#include <linux/property.h>
-#include <linux/etherdevice.h>
#include <linux/phy.h>
struct fwnode_handle *dev_fwnode(struct device *dev)
@@ -935,68 +934,6 @@ int device_get_phy_mode(struct device *dev)
}
EXPORT_SYMBOL_GPL(device_get_phy_mode);
-static void *fwnode_get_mac_addr(struct fwnode_handle *fwnode,
- const char *name, char *addr,
- int alen)
-{
- int ret = fwnode_property_read_u8_array(fwnode, name, addr, alen);
-
- if (ret == 0 && alen == ETH_ALEN && is_valid_ether_addr(addr))
- return addr;
- return NULL;
-}
-
-/**
- * fwnode_get_mac_address - Get the MAC from the firmware node
- * @fwnode: Pointer to the firmware node
- * @addr: Address of buffer to store the MAC in
- * @alen: Length of the buffer pointed to by addr, should be ETH_ALEN
- *
- * Search the firmware node for the best MAC address to use. 'mac-address' is
- * checked first, because that is supposed to contain to "most recent" MAC
- * address. If that isn't set, then 'local-mac-address' is checked next,
- * because that is the default address. If that isn't set, then the obsolete
- * 'address' is checked, just in case we're using an old device tree.
- *
- * Note that the 'address' property is supposed to contain a virtual address of
- * the register set, but some DTS files have redefined that property to be the
- * MAC address.
- *
- * All-zero MAC addresses are rejected, because those could be properties that
- * exist in the firmware tables, but were not updated by the firmware. For
- * example, the DTS could define 'mac-address' and 'local-mac-address', with
- * zero MAC addresses. Some older U-Boots only initialized 'local-mac-address'.
- * In this case, the real MAC is in 'local-mac-address', and 'mac-address'
- * exists but is all zeros.
-*/
-void *fwnode_get_mac_address(struct fwnode_handle *fwnode, char *addr, int alen)
-{
- char *res;
-
- res = fwnode_get_mac_addr(fwnode, "mac-address", addr, alen);
- if (res)
- return res;
-
- res = fwnode_get_mac_addr(fwnode, "local-mac-address", addr, alen);
- if (res)
- return res;
-
- return fwnode_get_mac_addr(fwnode, "address", addr, alen);
-}
-EXPORT_SYMBOL(fwnode_get_mac_address);
-
-/**
- * device_get_mac_address - Get the MAC for a given device
- * @dev: Pointer to the device
- * @addr: Address of buffer to store the MAC in
- * @alen: Length of the buffer pointed to by addr, should be ETH_ALEN
- */
-void *device_get_mac_address(struct device *dev, char *addr, int alen)
-{
- return fwnode_get_mac_address(dev_fwnode(dev), addr, alen);
-}
-EXPORT_SYMBOL(device_get_mac_address);
-
/**
* fwnode_irq_get - Get IRQ directly from a fwnode
* @fwnode: Pointer to the firmware node
diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c
index c6d6ba0d00b1..8e7ca3e4c8c4 100644
--- a/drivers/bcma/main.c
+++ b/drivers/bcma/main.c
@@ -20,7 +20,7 @@ MODULE_DESCRIPTION("Broadcom's specific AMBA driver");
MODULE_LICENSE("GPL");
/* contains the number the next bus should get. */
-static unsigned int bcma_bus_next_num = 0;
+static unsigned int bcma_bus_next_num;
/* bcma_buses_mutex locks the bcma_bus_next_num */
static DEFINE_MUTEX(bcma_buses_mutex);
diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c
index f1705b46fc88..9359bff47296 100644
--- a/drivers/bluetooth/btintel.c
+++ b/drivers/bluetooth/btintel.c
@@ -1037,8 +1037,9 @@ static bool btintel_firmware_version(struct hci_dev *hdev,
params = (void *)(fw_ptr + sizeof(*cmd));
- bt_dev_info(hdev, "Boot Address: 0x%x",
- le32_to_cpu(params->boot_addr));
+ *boot_addr = le32_to_cpu(params->boot_addr);
+
+ bt_dev_info(hdev, "Boot Address: 0x%x", *boot_addr);
bt_dev_info(hdev, "Firmware Version: %u-%u.%u",
params->fw_build_num, params->fw_build_ww,
@@ -1071,9 +1072,6 @@ int btintel_download_firmware(struct hci_dev *hdev,
/* Skip version checking */
break;
default:
- /* Skip reading firmware file version in bootloader mode */
- if (ver->fw_variant == 0x06)
- break;
/* Skip download if firmware has the same version */
if (btintel_firmware_version(hdev, ver->fw_build_num,
@@ -1114,19 +1112,16 @@ static int btintel_download_fw_tlv(struct hci_dev *hdev,
int err;
u32 css_header_ver;
- /* Skip reading firmware file version in bootloader mode */
- if (ver->img_type != 0x01) {
- /* Skip download if firmware has the same version */
- if (btintel_firmware_version(hdev, ver->min_fw_build_nn,
- ver->min_fw_build_cw,
- ver->min_fw_build_yy,
- fw, boot_param)) {
- bt_dev_info(hdev, "Firmware already loaded");
- /* Return -EALREADY to indicate that firmware has
- * already been loaded.
- */
- return -EALREADY;
- }
+ /* Skip download if firmware has the same version */
+ if (btintel_firmware_version(hdev, ver->min_fw_build_nn,
+ ver->min_fw_build_cw,
+ ver->min_fw_build_yy,
+ fw, boot_param)) {
+ bt_dev_info(hdev, "Firmware already loaded");
+ /* Return -EALREADY to indicate that firmware has
+ * already been loaded.
+ */
+ return -EALREADY;
}
/* The firmware variant determines if the device is in bootloader
@@ -1285,12 +1280,16 @@ static int btintel_read_debug_features(struct hci_dev *hdev,
static int btintel_set_debug_features(struct hci_dev *hdev,
const struct intel_debug_features *features)
{
- u8 mask[11] = { 0x0a, 0x92, 0x02, 0x07, 0x00, 0x00, 0x00, 0x00,
+ u8 mask[11] = { 0x0a, 0x92, 0x02, 0x7f, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00 };
+ u8 period[5] = { 0x04, 0x91, 0x02, 0x05, 0x00 };
+ u8 trace_enable = 0x02;
struct sk_buff *skb;
- if (!features)
+ if (!features) {
+ bt_dev_warn(hdev, "Debug features not read");
return -EINVAL;
+ }
if (!(features->page1[0] & 0x3f)) {
bt_dev_info(hdev, "Telemetry exception format not supported");
@@ -1303,11 +1302,95 @@ static int btintel_set_debug_features(struct hci_dev *hdev,
PTR_ERR(skb));
return PTR_ERR(skb);
}
+ kfree_skb(skb);
+
+ skb = __hci_cmd_sync(hdev, 0xfc8b, 5, period, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ bt_dev_err(hdev, "Setting periodicity for link statistics traces failed (%ld)",
+ PTR_ERR(skb));
+ return PTR_ERR(skb);
+ }
+ kfree_skb(skb);
+
+ skb = __hci_cmd_sync(hdev, 0xfca1, 1, &trace_enable, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ bt_dev_err(hdev, "Enable tracing of link statistics events failed (%ld)",
+ PTR_ERR(skb));
+ return PTR_ERR(skb);
+ }
+ kfree_skb(skb);
+
+ bt_dev_info(hdev, "set debug features: trace_enable 0x%02x mask 0x%02x",
+ trace_enable, mask[3]);
+
+ return 0;
+}
+
+static int btintel_reset_debug_features(struct hci_dev *hdev,
+ const struct intel_debug_features *features)
+{
+ u8 mask[11] = { 0x0a, 0x92, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00 };
+ u8 trace_enable = 0x00;
+ struct sk_buff *skb;
+
+ if (!features) {
+ bt_dev_warn(hdev, "Debug features not read");
+ return -EINVAL;
+ }
+
+ if (!(features->page1[0] & 0x3f)) {
+ bt_dev_info(hdev, "Telemetry exception format not supported");
+ return 0;
+ }
+
+ /* Should stop the trace before writing ddc event mask. */
+ skb = __hci_cmd_sync(hdev, 0xfca1, 1, &trace_enable, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ bt_dev_err(hdev, "Stop tracing of link statistics events failed (%ld)",
+ PTR_ERR(skb));
+ return PTR_ERR(skb);
+ }
+ kfree_skb(skb);
+ skb = __hci_cmd_sync(hdev, 0xfc8b, 11, mask, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ bt_dev_err(hdev, "Setting Intel telemetry ddc write event mask failed (%ld)",
+ PTR_ERR(skb));
+ return PTR_ERR(skb);
+ }
kfree_skb(skb);
+
+ bt_dev_info(hdev, "reset debug features: trace_enable 0x%02x mask 0x%02x",
+ trace_enable, mask[3]);
+
return 0;
}
+int btintel_set_quality_report(struct hci_dev *hdev, bool enable)
+{
+ struct intel_debug_features features;
+ int err;
+
+ bt_dev_dbg(hdev, "enable %d", enable);
+
+ /* Read the Intel supported features and if new exception formats
+ * supported, need to load the additional DDC config to enable.
+ */
+ err = btintel_read_debug_features(hdev, &features);
+ if (err)
+ return err;
+
+ /* Set or reset the debug features. */
+ if (enable)
+ err = btintel_set_debug_features(hdev, &features);
+ else
+ err = btintel_reset_debug_features(hdev, &features);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(btintel_set_quality_report);
+
static const struct firmware *btintel_legacy_rom_get_fw(struct hci_dev *hdev,
struct intel_version *ver)
{
@@ -1893,7 +1976,6 @@ static int btintel_bootloader_setup(struct hci_dev *hdev,
u32 boot_param;
char ddcname[64];
int err;
- struct intel_debug_features features;
BT_DBG("%s", hdev->name);
@@ -1934,14 +2016,7 @@ static int btintel_bootloader_setup(struct hci_dev *hdev,
btintel_load_ddc_config(hdev, ddcname);
}
- /* Read the Intel supported features and if new exception formats
- * supported, need to load the additional DDC config to enable.
- */
- err = btintel_read_debug_features(hdev, &features);
- if (!err) {
- /* Set DDC mask for available debug features */
- btintel_set_debug_features(hdev, &features);
- }
+ hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
/* Read the Intel version information after loading the FW */
err = btintel_read_version(hdev, &new_ver);
@@ -2083,13 +2158,102 @@ done:
return err;
}
+static int btintel_get_codec_config_data(struct hci_dev *hdev,
+ __u8 link, struct bt_codec *codec,
+ __u8 *ven_len, __u8 **ven_data)
+{
+ int err = 0;
+
+ if (!ven_data || !ven_len)
+ return -EINVAL;
+
+ *ven_len = 0;
+ *ven_data = NULL;
+
+ if (link != ESCO_LINK) {
+ bt_dev_err(hdev, "Invalid link type(%u)", link);
+ return -EINVAL;
+ }
+
+ *ven_data = kmalloc(sizeof(__u8), GFP_KERNEL);
+ if (!*ven_data) {
+ err = -ENOMEM;
+ goto error;
+ }
+
+ /* supports only CVSD and mSBC offload codecs */
+ switch (codec->id) {
+ case 0x02:
+ **ven_data = 0x00;
+ break;
+ case 0x05:
+ **ven_data = 0x01;
+ break;
+ default:
+ err = -EINVAL;
+ bt_dev_err(hdev, "Invalid codec id(%u)", codec->id);
+ goto error;
+ }
+ /* codec and its capabilities are pre-defined to ids
+ * preset id = 0x00 represents CVSD codec with sampling rate 8K
+ * preset id = 0x01 represents mSBC codec with sampling rate 16K
+ */
+ *ven_len = sizeof(__u8);
+ return err;
+
+error:
+ kfree(*ven_data);
+ *ven_data = NULL;
+ return err;
+}
+
+static int btintel_get_data_path_id(struct hci_dev *hdev, __u8 *data_path_id)
+{
+ /* Intel uses 1 as data path id for all the usecases */
+ *data_path_id = 1;
+ return 0;
+}
+
+static int btintel_configure_offload(struct hci_dev *hdev)
+{
+ struct sk_buff *skb;
+ int err = 0;
+ struct intel_offload_use_cases *use_cases;
+
+ skb = __hci_cmd_sync(hdev, 0xfc86, 0, NULL, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ bt_dev_err(hdev, "Reading offload use cases failed (%ld)",
+ PTR_ERR(skb));
+ return PTR_ERR(skb);
+ }
+
+ if (skb->len < sizeof(*use_cases)) {
+ err = -EIO;
+ goto error;
+ }
+
+ use_cases = (void *)skb->data;
+
+ if (use_cases->status) {
+ err = -bt_to_errno(skb->data[0]);
+ goto error;
+ }
+
+ if (use_cases->preset[0] & 0x03) {
+ hdev->get_data_path_id = btintel_get_data_path_id;
+ hdev->get_codec_config_data = btintel_get_codec_config_data;
+ }
+error:
+ kfree_skb(skb);
+ return err;
+}
+
static int btintel_bootloader_setup_tlv(struct hci_dev *hdev,
struct intel_version_tlv *ver)
{
u32 boot_param;
char ddcname[64];
int err;
- struct intel_debug_features features;
struct intel_version_tlv new_ver;
bt_dev_dbg(hdev, "");
@@ -2125,14 +2289,10 @@ static int btintel_bootloader_setup_tlv(struct hci_dev *hdev,
*/
btintel_load_ddc_config(hdev, ddcname);
- /* Read the Intel supported features and if new exception formats
- * supported, need to load the additional DDC config to enable.
- */
- err = btintel_read_debug_features(hdev, &features);
- if (!err) {
- /* Set DDC mask for available debug features */
- btintel_set_debug_features(hdev, &features);
- }
+ /* Read supported use cases and set callbacks to fetch datapath id */
+ btintel_configure_offload(hdev);
+
+ hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
/* Read the Intel version information after loading the FW */
err = btintel_read_version_tlv(hdev, &new_ver);
@@ -2232,6 +2392,9 @@ static int btintel_setup_combined(struct hci_dev *hdev)
set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
set_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks);
+ /* Set up the quality report callback for Intel devices */
+ hdev->set_quality_report = btintel_set_quality_report;
+
/* For Legacy device, check the HW platform value and size */
if (skb->len == sizeof(ver) && skb->data[1] == 0x37) {
bt_dev_dbg(hdev, "Read the legacy Intel version information");
diff --git a/drivers/bluetooth/btintel.h b/drivers/bluetooth/btintel.h
index aa64072bbe68..e500c0d7a729 100644
--- a/drivers/bluetooth/btintel.h
+++ b/drivers/bluetooth/btintel.h
@@ -132,6 +132,11 @@ struct intel_debug_features {
__u8 page1[16];
} __packed;
+struct intel_offload_use_cases {
+ __u8 status;
+ __u8 preset[8];
+} __packed;
+
#define INTEL_HW_PLATFORM(cnvx_bt) ((u8)(((cnvx_bt) & 0x0000ff00) >> 8))
#define INTEL_HW_VARIANT(cnvx_bt) ((u8)(((cnvx_bt) & 0x003f0000) >> 16))
#define INTEL_CNVX_TOP_TYPE(cnvx_top) ((cnvx_top) & 0x00000fff)
@@ -204,6 +209,7 @@ int btintel_configure_setup(struct hci_dev *hdev);
void btintel_bootup(struct hci_dev *hdev, const void *ptr, unsigned int len);
void btintel_secure_send_result(struct hci_dev *hdev,
const void *ptr, unsigned int len);
+int btintel_set_quality_report(struct hci_dev *hdev, bool enable);
#else
static inline int btintel_check_bdaddr(struct hci_dev *hdev)
@@ -294,4 +300,9 @@ static inline void btintel_secure_send_result(struct hci_dev *hdev,
const void *ptr, unsigned int len)
{
}
+
+static inline int btintel_set_quality_report(struct hci_dev *hdev, bool enable)
+{
+ return -ENODEV;
+}
#endif
diff --git a/drivers/bluetooth/btmrvl_main.c b/drivers/bluetooth/btmrvl_main.c
index 8b9d78ce6bb2..5ccbe4d459d0 100644
--- a/drivers/bluetooth/btmrvl_main.c
+++ b/drivers/bluetooth/btmrvl_main.c
@@ -587,12 +587,12 @@ static int btmrvl_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr)
return 0;
}
-static bool btmrvl_prevent_wake(struct hci_dev *hdev)
+static bool btmrvl_wakeup(struct hci_dev *hdev)
{
struct btmrvl_private *priv = hci_get_drvdata(hdev);
struct btmrvl_sdio_card *card = priv->btmrvl_dev.card;
- return !device_may_wakeup(&card->func->dev);
+ return device_may_wakeup(&card->func->dev);
}
/*
@@ -696,7 +696,7 @@ int btmrvl_register_hdev(struct btmrvl_private *priv)
hdev->send = btmrvl_send_frame;
hdev->setup = btmrvl_setup;
hdev->set_bdaddr = btmrvl_set_bdaddr;
- hdev->prevent_wake = btmrvl_prevent_wake;
+ hdev->wakeup = btmrvl_wakeup;
SET_HCIDEV_DEV(hdev, &card->func->dev);
hdev->dev_type = priv->btmrvl_dev.dev_type;
diff --git a/drivers/bluetooth/btmtkuart.c b/drivers/bluetooth/btmtkuart.c
index e9d91d7c0db4..9ba22b13b4fa 100644
--- a/drivers/bluetooth/btmtkuart.c
+++ b/drivers/bluetooth/btmtkuart.c
@@ -158,8 +158,10 @@ static int mtk_hci_wmt_sync(struct hci_dev *hdev,
int err;
hlen = sizeof(*hdr) + wmt_params->dlen;
- if (hlen > 255)
- return -EINVAL;
+ if (hlen > 255) {
+ err = -EINVAL;
+ goto err_free_skb;
+ }
hdr = (struct mtk_wmt_hdr *)&wc;
hdr->dir = 1;
@@ -173,7 +175,7 @@ static int mtk_hci_wmt_sync(struct hci_dev *hdev,
err = __hci_cmd_send(hdev, 0xfc6f, hlen, &wc);
if (err < 0) {
clear_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state);
- return err;
+ goto err_free_skb;
}
/* The vendor specific WMT commands are all answered by a vendor
@@ -190,13 +192,14 @@ static int mtk_hci_wmt_sync(struct hci_dev *hdev,
if (err == -EINTR) {
bt_dev_err(hdev, "Execution of wmt command interrupted");
clear_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state);
- return err;
+ goto err_free_skb;
}
if (err) {
bt_dev_err(hdev, "Execution of wmt command timed out");
clear_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state);
- return -ETIMEDOUT;
+ err = -ETIMEDOUT;
+ goto err_free_skb;
}
/* Parse and handle the return WMT event */
diff --git a/drivers/bluetooth/btrsi.c b/drivers/bluetooth/btrsi.c
index 8646b6dd11e9..634cf8f5ed2d 100644
--- a/drivers/bluetooth/btrsi.c
+++ b/drivers/bluetooth/btrsi.c
@@ -19,7 +19,6 @@
#include <net/bluetooth/hci_core.h>
#include <asm/unaligned.h>
#include <net/rsi_91x.h>
-#include <net/genetlink.h>
#define RSI_DMA_ALIGN 8
#define RSI_FRAME_DESC_SIZE 16
diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c
index 1f8afa0244d8..c2bdd1e6060e 100644
--- a/drivers/bluetooth/btrtl.c
+++ b/drivers/bluetooth/btrtl.c
@@ -59,6 +59,7 @@ struct id_table {
__u8 hci_bus;
bool config_needed;
bool has_rom_version;
+ bool has_msft_ext;
char *fw_name;
char *cfg_name;
};
@@ -121,6 +122,7 @@ static const struct id_table ic_id_table[] = {
{ IC_INFO(RTL_ROM_LMP_8821A, 0xc, 0x8, HCI_USB),
.config_needed = false,
.has_rom_version = true,
+ .has_msft_ext = true,
.fw_name = "rtl_bt/rtl8821c_fw.bin",
.cfg_name = "rtl_bt/rtl8821c_config" },
@@ -135,6 +137,7 @@ static const struct id_table ic_id_table[] = {
{ IC_INFO(RTL_ROM_LMP_8761A, 0xb, 0xa, HCI_UART),
.config_needed = false,
.has_rom_version = true,
+ .has_msft_ext = true,
.fw_name = "rtl_bt/rtl8761b_fw.bin",
.cfg_name = "rtl_bt/rtl8761b_config" },
@@ -149,6 +152,7 @@ static const struct id_table ic_id_table[] = {
{ IC_INFO(RTL_ROM_LMP_8822B, 0xc, 0xa, HCI_UART),
.config_needed = true,
.has_rom_version = true,
+ .has_msft_ext = true,
.fw_name = "rtl_bt/rtl8822cs_fw.bin",
.cfg_name = "rtl_bt/rtl8822cs_config" },
@@ -156,6 +160,7 @@ static const struct id_table ic_id_table[] = {
{ IC_INFO(RTL_ROM_LMP_8822B, 0xc, 0xa, HCI_USB),
.config_needed = false,
.has_rom_version = true,
+ .has_msft_ext = true,
.fw_name = "rtl_bt/rtl8822cu_fw.bin",
.cfg_name = "rtl_bt/rtl8822cu_config" },
@@ -163,6 +168,7 @@ static const struct id_table ic_id_table[] = {
{ IC_INFO(RTL_ROM_LMP_8822B, 0xb, 0x7, HCI_USB),
.config_needed = true,
.has_rom_version = true,
+ .has_msft_ext = true,
.fw_name = "rtl_bt/rtl8822b_fw.bin",
.cfg_name = "rtl_bt/rtl8822b_config" },
@@ -170,6 +176,7 @@ static const struct id_table ic_id_table[] = {
{ IC_INFO(RTL_ROM_LMP_8852A, 0xa, 0xb, HCI_USB),
.config_needed = false,
.has_rom_version = true,
+ .has_msft_ext = true,
.fw_name = "rtl_bt/rtl8852au_fw.bin",
.cfg_name = "rtl_bt/rtl8852au_config" },
};
@@ -594,8 +601,10 @@ struct btrtl_device_info *btrtl_initialize(struct hci_dev *hdev,
hci_rev = le16_to_cpu(resp->hci_rev);
lmp_subver = le16_to_cpu(resp->lmp_subver);
- if (resp->hci_ver == 0x8 && le16_to_cpu(resp->hci_rev) == 0x826c &&
- resp->lmp_ver == 0x8 && le16_to_cpu(resp->lmp_subver) == 0xa99e)
+ btrtl_dev->ic_info = btrtl_match_ic(lmp_subver, hci_rev, hci_ver,
+ hdev->bus);
+
+ if (!btrtl_dev->ic_info)
btrtl_dev->drop_fw = true;
if (btrtl_dev->drop_fw) {
@@ -634,13 +643,13 @@ struct btrtl_device_info *btrtl_initialize(struct hci_dev *hdev,
hci_ver = resp->hci_ver;
hci_rev = le16_to_cpu(resp->hci_rev);
lmp_subver = le16_to_cpu(resp->lmp_subver);
+
+ btrtl_dev->ic_info = btrtl_match_ic(lmp_subver, hci_rev, hci_ver,
+ hdev->bus);
}
out_free:
kfree_skb(skb);
- btrtl_dev->ic_info = btrtl_match_ic(lmp_subver, hci_rev, hci_ver,
- hdev->bus);
-
if (!btrtl_dev->ic_info) {
rtl_dev_info(hdev, "unknown IC info, lmp subver %04x, hci rev %04x, hci ver %04x",
lmp_subver, hci_rev, hci_ver);
@@ -684,12 +693,8 @@ out_free:
/* The following chips supports the Microsoft vendor extension,
* therefore set the corresponding VsMsftOpCode.
*/
- switch (lmp_subver) {
- case RTL_ROM_LMP_8822B:
- case RTL_ROM_LMP_8852A:
+ if (btrtl_dev->ic_info->has_msft_ext)
hci_set_msft_opcode(hdev, 0xFCF0);
- break;
- }
return btrtl_dev;
@@ -746,6 +751,7 @@ void btrtl_set_quirks(struct hci_dev *hdev, struct btrtl_device_info *btrtl_dev)
case CHIP_ID_8852A:
set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks);
set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks);
+ hci_set_aosp_capable(hdev);
break;
default:
rtl_dev_dbg(hdev, "Central-peripheral role not enabled.");
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 60d2fce59a71..75c83768c257 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -384,6 +384,12 @@ static const struct usb_device_id blacklist_table[] = {
/* Realtek 8852AE Bluetooth devices */
{ USB_DEVICE(0x0bda, 0xc852), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0bda, 0x4852), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x04c5, 0x165c), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x04ca, 0x4006), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
/* Realtek Bluetooth devices */
{ USB_VENDOR_AND_INTERFACE_INFO(0x0bda, 0xe0, 0x01, 0x01),
@@ -410,6 +416,9 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x13d3, 0x3563), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
+ { USB_DEVICE(0x13d3, 0x3564), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH |
+ BTUSB_VALID_LE_STATES },
{ USB_DEVICE(0x0489, 0xe0cd), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
@@ -433,6 +442,10 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x0bda, 0xb009), .driver_info = BTUSB_REALTEK },
{ USB_DEVICE(0x2ff8, 0xb011), .driver_info = BTUSB_REALTEK },
+ /* Additional Realtek 8761B Bluetooth devices */
+ { USB_DEVICE(0x2357, 0x0604), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
+
/* Additional Realtek 8761BU Bluetooth devices */
{ USB_DEVICE(0x0b05, 0x190e), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
@@ -451,10 +464,6 @@ static const struct usb_device_id blacklist_table[] = {
/* Additional Realtek 8822CE Bluetooth devices */
{ USB_DEVICE(0x04ca, 0x4005), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
- /* Bluetooth component of Realtek 8852AE device */
- { USB_DEVICE(0x04ca, 0x4006), .driver_info = BTUSB_REALTEK |
- BTUSB_WIDEBAND_SPEECH },
-
{ USB_DEVICE(0x04c5, 0x161f), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0b05, 0x18ef), .driver_info = BTUSB_REALTEK |
@@ -652,11 +661,33 @@ static void btusb_rtl_cmd_timeout(struct hci_dev *hdev)
static void btusb_qca_cmd_timeout(struct hci_dev *hdev)
{
struct btusb_data *data = hci_get_drvdata(hdev);
+ struct gpio_desc *reset_gpio = data->reset_gpio;
int err;
if (++data->cmd_timeout_cnt < 5)
return;
+ if (reset_gpio) {
+ bt_dev_err(hdev, "Reset qca device via bt_en gpio");
+
+ /* Toggle the hard reset line. The qca bt device is going to
+ * yank itself off the USB and then replug. The cleanup is handled
+ * correctly on the way out (standard USB disconnect), and the new
+ * device is detected cleanly and bound to the driver again like
+ * it should be.
+ */
+ if (test_and_set_bit(BTUSB_HW_RESET_ACTIVE, &data->flags)) {
+ bt_dev_err(hdev, "last reset failed? Not resetting again");
+ return;
+ }
+
+ gpiod_set_value_cansleep(reset_gpio, 0);
+ msleep(200);
+ gpiod_set_value_cansleep(reset_gpio, 1);
+
+ return;
+ }
+
bt_dev_err(hdev, "Multiple cmd timeouts seen. Resetting usb device.");
/* This is not an unbalanced PM reference since the device will reset */
err = usb_autopm_get_interface(data->intf);
@@ -2200,6 +2231,23 @@ struct btmtk_section_map {
};
} __packed;
+static int btusb_set_bdaddr_mtk(struct hci_dev *hdev, const bdaddr_t *bdaddr)
+{
+ struct sk_buff *skb;
+ long ret;
+
+ skb = __hci_cmd_sync(hdev, 0xfc1a, sizeof(bdaddr), bdaddr, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ ret = PTR_ERR(skb);
+ bt_dev_err(hdev, "changing Mediatek device address failed (%ld)",
+ ret);
+ return ret;
+ }
+ kfree_skb(skb);
+
+ return 0;
+}
+
static void btusb_mtk_wmt_recv(struct urb *urb)
{
struct hci_dev *hdev = urb->context;
@@ -2804,6 +2852,7 @@ static int btusb_mtk_setup(struct hci_dev *hdev)
case 0x7668:
fwname = FIRMWARE_MT7668;
break;
+ case 0x7922:
case 0x7961:
snprintf(fw_bin_name, sizeof(fw_bin_name),
"mediatek/BT_RAM_CODE_MT%04x_1_%x_hdr.bin",
@@ -3591,11 +3640,11 @@ static void btusb_check_needs_reset_resume(struct usb_interface *intf)
interface_to_usbdev(intf)->quirks |= USB_QUIRK_RESET_RESUME;
}
-static bool btusb_prevent_wake(struct hci_dev *hdev)
+static bool btusb_wakeup(struct hci_dev *hdev)
{
struct btusb_data *data = hci_get_drvdata(hdev);
- return !device_may_wakeup(&data->udev->dev);
+ return device_may_wakeup(&data->udev->dev);
}
static int btusb_shutdown_qca(struct hci_dev *hdev)
@@ -3752,7 +3801,7 @@ static int btusb_probe(struct usb_interface *intf,
hdev->flush = btusb_flush;
hdev->send = btusb_send_frame;
hdev->notify = btusb_notify;
- hdev->prevent_wake = btusb_prevent_wake;
+ hdev->wakeup = btusb_wakeup;
#ifdef CONFIG_PM
err = btusb_config_oob_wake(hdev);
@@ -3819,6 +3868,7 @@ static int btusb_probe(struct usb_interface *intf,
hdev->shutdown = btusb_mtk_shutdown;
hdev->manufacturer = 70;
hdev->cmd_timeout = btusb_mtk_cmd_timeout;
+ hdev->set_bdaddr = btusb_set_bdaddr_mtk;
set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks);
data->recv_acl = btusb_recv_acl_mtk;
}
diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c
index 0c0dedece59c..34286ffe0568 100644
--- a/drivers/bluetooth/hci_h5.c
+++ b/drivers/bluetooth/hci_h5.c
@@ -587,9 +587,11 @@ static int h5_recv(struct hci_uart *hu, const void *data, int count)
count -= processed;
}
- pm_runtime_get(&hu->serdev->dev);
- pm_runtime_mark_last_busy(&hu->serdev->dev);
- pm_runtime_put_autosuspend(&hu->serdev->dev);
+ if (hu->serdev) {
+ pm_runtime_get(&hu->serdev->dev);
+ pm_runtime_mark_last_busy(&hu->serdev->dev);
+ pm_runtime_put_autosuspend(&hu->serdev->dev);
+ }
return 0;
}
@@ -814,7 +816,6 @@ static int h5_serdev_probe(struct serdev_device *serdev)
struct device *dev = &serdev->dev;
struct h5 *h5;
const struct h5_device_data *data;
- int err;
h5 = devm_kzalloc(dev, sizeof(*h5), GFP_KERNEL);
if (!h5)
@@ -846,6 +847,8 @@ static int h5_serdev_probe(struct serdev_device *serdev)
h5->vnd = data->vnd;
}
+ if (data->driver_info & H5_INFO_WAKEUP_DISABLE)
+ set_bit(H5_WAKEUP_DISABLE, &h5->flags);
h5->enable_gpio = devm_gpiod_get_optional(dev, "enable", GPIOD_OUT_LOW);
if (IS_ERR(h5->enable_gpio))
@@ -856,14 +859,7 @@ static int h5_serdev_probe(struct serdev_device *serdev)
if (IS_ERR(h5->device_wake_gpio))
return PTR_ERR(h5->device_wake_gpio);
- err = hci_uart_register_device(&h5->serdev_hu, &h5p);
- if (err)
- return err;
-
- if (data->driver_info & H5_INFO_WAKEUP_DISABLE)
- set_bit(H5_WAKEUP_DISABLE, &h5->flags);
-
- return 0;
+ return hci_uart_register_device(&h5->serdev_hu, &h5p);
}
static void h5_serdev_remove(struct serdev_device *serdev)
@@ -962,11 +958,13 @@ static void h5_btrtl_open(struct h5 *h5)
serdev_device_set_parity(h5->hu->serdev, SERDEV_PARITY_EVEN);
serdev_device_set_baudrate(h5->hu->serdev, 115200);
- pm_runtime_set_active(&h5->hu->serdev->dev);
- pm_runtime_use_autosuspend(&h5->hu->serdev->dev);
- pm_runtime_set_autosuspend_delay(&h5->hu->serdev->dev,
- SUSPEND_TIMEOUT_MS);
- pm_runtime_enable(&h5->hu->serdev->dev);
+ if (!test_bit(H5_WAKEUP_DISABLE, &h5->flags)) {
+ pm_runtime_set_active(&h5->hu->serdev->dev);
+ pm_runtime_use_autosuspend(&h5->hu->serdev->dev);
+ pm_runtime_set_autosuspend_delay(&h5->hu->serdev->dev,
+ SUSPEND_TIMEOUT_MS);
+ pm_runtime_enable(&h5->hu->serdev->dev);
+ }
/* The controller needs up to 500ms to wakeup */
gpiod_set_value_cansleep(h5->enable_gpio, 1);
@@ -976,7 +974,8 @@ static void h5_btrtl_open(struct h5 *h5)
static void h5_btrtl_close(struct h5 *h5)
{
- pm_runtime_disable(&h5->hu->serdev->dev);
+ if (!test_bit(H5_WAKEUP_DISABLE, &h5->flags))
+ pm_runtime_disable(&h5->hu->serdev->dev);
gpiod_set_value_cansleep(h5->device_wake_gpio, 0);
gpiod_set_value_cansleep(h5->enable_gpio, 0);
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index 5ed2cfa7da1d..5e32e4d5367a 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -479,6 +479,9 @@ static int hci_uart_tty_open(struct tty_struct *tty)
BT_DBG("tty %p", tty);
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
/* Error if the tty has no write op instead of leaving an exploitable
* hole
*/
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index 53deea2eb7b4..dd768a8ed7cb 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -1577,7 +1577,7 @@ static void qca_cmd_timeout(struct hci_dev *hdev)
mutex_unlock(&qca->hci_memdump_lock);
}
-static bool qca_prevent_wake(struct hci_dev *hdev)
+static bool qca_wakeup(struct hci_dev *hdev)
{
struct hci_uart *hu = hci_get_drvdata(hdev);
bool wakeup;
@@ -1730,6 +1730,7 @@ retry:
if (qca_is_wcn399x(soc_type) ||
qca_is_wcn6750(soc_type)) {
set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks);
+ hci_set_aosp_capable(hdev);
ret = qca_read_soc_version(hdev, &ver, soc_type);
if (ret)
@@ -1764,7 +1765,7 @@ retry:
qca_debugfs_init(hdev);
hu->hdev->hw_error = qca_hw_error;
hu->hdev->cmd_timeout = qca_cmd_timeout;
- hu->hdev->prevent_wake = qca_prevent_wake;
+ hu->hdev->wakeup = qca_wakeup;
} else if (ret == -ENOENT) {
/* No patch/nvm-config found, run with original fw/config */
set_bit(QCA_ROM_FW, &qca->flags);
diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c
index 8ab26dec5f6e..b45db0db347c 100644
--- a/drivers/bluetooth/hci_vhci.c
+++ b/drivers/bluetooth/hci_vhci.c
@@ -21,6 +21,7 @@
#include <linux/skbuff.h>
#include <linux/miscdevice.h>
+#include <linux/debugfs.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
@@ -37,6 +38,9 @@ struct vhci_data {
struct mutex open_mutex;
struct delayed_work open_timeout;
+
+ bool suspended;
+ bool wakeup;
};
static int vhci_open_dev(struct hci_dev *hdev)
@@ -73,6 +77,115 @@ static int vhci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
return 0;
}
+static int vhci_get_data_path_id(struct hci_dev *hdev, u8 *data_path_id)
+{
+ *data_path_id = 0;
+ return 0;
+}
+
+static int vhci_get_codec_config_data(struct hci_dev *hdev, __u8 type,
+ struct bt_codec *codec, __u8 *vnd_len,
+ __u8 **vnd_data)
+{
+ if (type != ESCO_LINK)
+ return -EINVAL;
+
+ *vnd_len = 0;
+ *vnd_data = NULL;
+ return 0;
+}
+
+static bool vhci_wakeup(struct hci_dev *hdev)
+{
+ struct vhci_data *data = hci_get_drvdata(hdev);
+
+ return data->wakeup;
+}
+
+static ssize_t force_suspend_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct vhci_data *data = file->private_data;
+ char buf[3];
+
+ buf[0] = data->suspended ? 'Y' : 'N';
+ buf[1] = '\n';
+ buf[2] = '\0';
+ return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
+}
+
+static ssize_t force_suspend_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct vhci_data *data = file->private_data;
+ bool enable;
+ int err;
+
+ err = kstrtobool_from_user(user_buf, count, &enable);
+ if (err)
+ return err;
+
+ if (data->suspended == enable)
+ return -EALREADY;
+
+ if (enable)
+ err = hci_suspend_dev(data->hdev);
+ else
+ err = hci_resume_dev(data->hdev);
+
+ if (err)
+ return err;
+
+ data->suspended = enable;
+
+ return count;
+}
+
+static const struct file_operations force_suspend_fops = {
+ .open = simple_open,
+ .read = force_suspend_read,
+ .write = force_suspend_write,
+ .llseek = default_llseek,
+};
+
+static ssize_t force_wakeup_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct vhci_data *data = file->private_data;
+ char buf[3];
+
+ buf[0] = data->wakeup ? 'Y' : 'N';
+ buf[1] = '\n';
+ buf[2] = '\0';
+ return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
+}
+
+static ssize_t force_wakeup_write(struct file *file,
+ const char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+ struct vhci_data *data = file->private_data;
+ bool enable;
+ int err;
+
+ err = kstrtobool_from_user(user_buf, count, &enable);
+ if (err)
+ return err;
+
+ if (data->wakeup == enable)
+ return -EALREADY;
+
+ return count;
+}
+
+static const struct file_operations force_wakeup_fops = {
+ .open = simple_open,
+ .read = force_wakeup_read,
+ .write = force_wakeup_write,
+ .llseek = default_llseek,
+};
+
static int __vhci_create_device(struct vhci_data *data, __u8 opcode)
{
struct hci_dev *hdev;
@@ -112,6 +225,9 @@ static int __vhci_create_device(struct vhci_data *data, __u8 opcode)
hdev->close = vhci_close_dev;
hdev->flush = vhci_flush;
hdev->send = vhci_send_frame;
+ hdev->get_data_path_id = vhci_get_data_path_id;
+ hdev->get_codec_config_data = vhci_get_codec_config_data;
+ hdev->wakeup = vhci_wakeup;
/* bit 6 is for external configuration */
if (opcode & 0x40)
@@ -129,6 +245,12 @@ static int __vhci_create_device(struct vhci_data *data, __u8 opcode)
return -EBUSY;
}
+ debugfs_create_file("force_suspend", 0644, hdev->debugfs, data,
+ &force_suspend_fops);
+
+ debugfs_create_file("force_wakeup", 0644, hdev->debugfs, data,
+ &force_wakeup_fops);
+
hci_skb_pkt_type(skb) = HCI_VENDOR_PKT;
skb_put_u8(skb, 0xff);
diff --git a/drivers/hsi/clients/ssi_protocol.c b/drivers/hsi/clients/ssi_protocol.c
index 96d0eccca3aa..21f11a5b965b 100644
--- a/drivers/hsi/clients/ssi_protocol.c
+++ b/drivers/hsi/clients/ssi_protocol.c
@@ -1055,14 +1055,16 @@ static const struct net_device_ops ssip_pn_ops = {
static void ssip_pn_setup(struct net_device *dev)
{
+ static const u8 addr = PN_MEDIA_SOS;
+
dev->features = 0;
dev->netdev_ops = &ssip_pn_ops;
dev->type = ARPHRD_PHONET;
dev->flags = IFF_POINTOPOINT | IFF_NOARP;
dev->mtu = SSIP_DEFAULT_MTU;
dev->hard_header_len = 1;
- dev->dev_addr[0] = PN_MEDIA_SOS;
dev->addr_len = 1;
+ dev_addr_set(dev, &addr);
dev->tx_queue_len = SSIP_TXQUEUE_LEN;
dev->needs_free_netdev = true;
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index f367f4a4abff..f3fa2fe6a88a 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -2275,7 +2275,7 @@ static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev,
u64 release_mac = MLX4_IB_INVALID_MAC;
struct mlx4_ib_qp *qp;
- new_smac = mlx4_mac_to_u64(dev->dev_addr);
+ new_smac = ether_addr_to_u64(dev->dev_addr);
atomic64_set(&ibdev->iboe.mac[port - 1], new_smac);
/* no need for update QP1 and mac registration in non-SRIOV */
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 8662f462e2a5..aea4182f33a4 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -1853,7 +1853,7 @@ static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_qp_attr *qp,
u16 vlan_id, u8 *smac)
{
return _mlx4_set_path(dev, &qp->ah_attr,
- mlx4_mac_to_u64(smac),
+ ether_addr_to_u64(smac),
vlan_id,
path, &mqp->pri, port);
}
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index d0d98e584ebc..81147d774dd2 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -1559,6 +1559,7 @@ int mlx5r_odp_create_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq)
eq->irq_nb.notifier_call = mlx5_ib_eq_pf_int;
param = (struct mlx5_eq_param) {
+ .irq_index = MLX5_IRQ_EQ_CTRL,
.nent = MLX5_IB_NUM_PF_EQE,
};
param.mask[0] = 1ull << MLX5_EVENT_TYPE_PAGE_FAULT;
diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c
index 755930be01b8..dc203f3d0f25 100644
--- a/drivers/infiniband/hw/qedr/main.c
+++ b/drivers/infiniband/hw/qedr/main.c
@@ -272,7 +272,7 @@ static int qedr_register_device(struct qedr_dev *dev)
static int qedr_alloc_mem_sb(struct qedr_dev *dev,
struct qed_sb_info *sb_info, u16 sb_id)
{
- struct status_block_e4 *sb_virt;
+ struct status_block *sb_virt;
dma_addr_t sb_phys;
int rc;
diff --git a/drivers/net/appletalk/cops.c b/drivers/net/appletalk/cops.c
index f0695d68c47e..97f254bdbb16 100644
--- a/drivers/net/appletalk/cops.c
+++ b/drivers/net/appletalk/cops.c
@@ -945,8 +945,8 @@ static int cops_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
dev->broadcast[0] = 0xFF;
/* Set hardware address. */
- dev->dev_addr[0] = aa->s_node;
dev->addr_len = 1;
+ dev_addr_set(dev, &aa->s_node);
return 0;
case SIOCGIFADDR:
diff --git a/drivers/net/appletalk/ltpc.c b/drivers/net/appletalk/ltpc.c
index 1f8925e75b3f..388d7b3bd4c2 100644
--- a/drivers/net/appletalk/ltpc.c
+++ b/drivers/net/appletalk/ltpc.c
@@ -846,9 +846,8 @@ static int ltpc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
set_30 (dev,ltflags);
dev->broadcast[0] = 0xFF;
- dev->dev_addr[0] = aa->s_node;
-
dev->addr_len=1;
+ dev_addr_set(dev, &aa->s_node);
return 0;
diff --git a/drivers/net/arcnet/arc-rimi.c b/drivers/net/arcnet/arc-rimi.c
index 12d085405bd0..8c3ccc7c83cd 100644
--- a/drivers/net/arcnet/arc-rimi.c
+++ b/drivers/net/arcnet/arc-rimi.c
@@ -207,7 +207,8 @@ static int __init arcrimi_found(struct net_device *dev)
}
/* get and check the station ID from offset 1 in shmem */
- dev->dev_addr[0] = arcnet_readb(lp->mem_start, COM9026_REG_R_STATION);
+ arcnet_set_addr(dev, arcnet_readb(lp->mem_start,
+ COM9026_REG_R_STATION));
arc_printk(D_NORMAL, dev, "ARCnet RIM I: station %02Xh found at IRQ %d, ShMem %lXh (%ld*%d bytes)\n",
dev->dev_addr[0],
@@ -324,7 +325,7 @@ static int __init arc_rimi_init(void)
return -ENOMEM;
if (node && node != 0xff)
- dev->dev_addr[0] = node;
+ arcnet_set_addr(dev, node);
dev->mem_start = io;
dev->irq = irq;
diff --git a/drivers/net/arcnet/arcdevice.h b/drivers/net/arcnet/arcdevice.h
index 5d4a4c7efbbf..19e996a829c9 100644
--- a/drivers/net/arcnet/arcdevice.h
+++ b/drivers/net/arcnet/arcdevice.h
@@ -364,6 +364,11 @@ netdev_tx_t arcnet_send_packet(struct sk_buff *skb,
struct net_device *dev);
void arcnet_timeout(struct net_device *dev, unsigned int txqueue);
+static inline void arcnet_set_addr(struct net_device *dev, u8 addr)
+{
+ dev_addr_set(dev, &addr);
+}
+
/* I/O equivalents */
#ifdef CONFIG_SA1100_CT6001
diff --git a/drivers/net/arcnet/com20020-isa.c b/drivers/net/arcnet/com20020-isa.c
index be618e4b9ed5..293a621e654c 100644
--- a/drivers/net/arcnet/com20020-isa.c
+++ b/drivers/net/arcnet/com20020-isa.c
@@ -151,7 +151,7 @@ static int __init com20020_init(void)
return -ENOMEM;
if (node && node != 0xff)
- dev->dev_addr[0] = node;
+ arcnet_set_addr(dev, node);
dev->netdev_ops = &com20020_netdev_ops;
diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c
index 3c8f665c1558..6382e1937cca 100644
--- a/drivers/net/arcnet/com20020-pci.c
+++ b/drivers/net/arcnet/com20020-pci.c
@@ -194,7 +194,7 @@ static int com20020pci_probe(struct pci_dev *pdev,
SET_NETDEV_DEV(dev, &pdev->dev);
dev->base_addr = ioaddr;
- dev->dev_addr[0] = node;
+ arcnet_set_addr(dev, node);
dev->sysfs_groups[0] = &com20020_state_group;
dev->irq = pdev->irq;
lp->card_name = "PCI COM20020";
diff --git a/drivers/net/arcnet/com20020.c b/drivers/net/arcnet/com20020.c
index 78043a9c5981..06e1651b594b 100644
--- a/drivers/net/arcnet/com20020.c
+++ b/drivers/net/arcnet/com20020.c
@@ -157,7 +157,7 @@ static int com20020_set_hwaddr(struct net_device *dev, void *addr)
struct arcnet_local *lp = netdev_priv(dev);
struct sockaddr *hwaddr = addr;
- memcpy(dev->dev_addr, hwaddr->sa_data, 1);
+ dev_addr_set(dev, hwaddr->sa_data);
com20020_set_subaddress(lp, ioaddr, SUB_NODE);
arcnet_outb(dev->dev_addr[0], ioaddr, COM20020_REG_W_XREG);
@@ -220,7 +220,7 @@ int com20020_found(struct net_device *dev, int shared)
/* FIXME: do this some other way! */
if (!dev->dev_addr[0])
- dev->dev_addr[0] = arcnet_inb(ioaddr, 8);
+ arcnet_set_addr(dev, arcnet_inb(ioaddr, 8));
com20020_set_subaddress(lp, ioaddr, SUB_SETUP1);
arcnet_outb(lp->setup, ioaddr, COM20020_REG_W_XREG);
diff --git a/drivers/net/arcnet/com20020_cs.c b/drivers/net/arcnet/com20020_cs.c
index b88a109b3b15..24150c933fcb 100644
--- a/drivers/net/arcnet/com20020_cs.c
+++ b/drivers/net/arcnet/com20020_cs.c
@@ -133,7 +133,7 @@ static int com20020_probe(struct pcmcia_device *p_dev)
lp->hw.owner = THIS_MODULE;
/* fill in our module parameters as defaults */
- dev->dev_addr[0] = node;
+ arcnet_set_addr(dev, node);
p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_8;
p_dev->resource[0]->end = 16;
diff --git a/drivers/net/arcnet/com90io.c b/drivers/net/arcnet/com90io.c
index 3856b447d38e..37b47749fc8b 100644
--- a/drivers/net/arcnet/com90io.c
+++ b/drivers/net/arcnet/com90io.c
@@ -252,7 +252,7 @@ static int __init com90io_found(struct net_device *dev)
/* get and check the station ID from offset 1 in shmem */
- dev->dev_addr[0] = get_buffer_byte(dev, 1);
+ arcnet_set_addr(dev, get_buffer_byte(dev, 1));
err = register_netdev(dev);
if (err) {
diff --git a/drivers/net/arcnet/com90xx.c b/drivers/net/arcnet/com90xx.c
index d8dfb9ea0de8..f49dae194284 100644
--- a/drivers/net/arcnet/com90xx.c
+++ b/drivers/net/arcnet/com90xx.c
@@ -531,7 +531,8 @@ static int __init com90xx_found(int ioaddr, int airq, u_long shmem,
}
/* get and check the station ID from offset 1 in shmem */
- dev->dev_addr[0] = arcnet_readb(lp->mem_start, COM9026_REG_R_STATION);
+ arcnet_set_addr(dev, arcnet_readb(lp->mem_start,
+ COM9026_REG_R_STATION));
dev->base_addr = ioaddr;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 77dc79a7f574..0c52612cb8e9 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -4414,7 +4414,7 @@ static int bond_set_mac_address(struct net_device *bond_dev, void *addr)
}
/* success */
- memcpy(bond_dev->dev_addr, ss->__data, bond_dev->addr_len);
+ dev_addr_set(bond_dev, ss->__data);
return 0;
unwind:
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index b9e9842fed94..c48b77167fab 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -811,8 +811,8 @@ int bond_create_sysfs(struct bond_net *bn)
*/
if (ret == -EEXIST) {
/* Is someone being kinky and naming a device bonding_master? */
- if (__dev_get_by_name(bn->net,
- class_attr_bonding_masters.attr.name))
+ if (netdev_name_in_use(bn->net,
+ class_attr_bonding_masters.attr.name))
pr_err("network device named %s already exists in sysfs\n",
class_attr_bonding_masters.attr.name);
ret = 0;
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
index a5f1aa911fe2..7b1457a6e327 100644
--- a/drivers/net/dsa/Kconfig
+++ b/drivers/net/dsa/Kconfig
@@ -70,6 +70,7 @@ config NET_DSA_QCA8K
config NET_DSA_REALTEK_SMI
tristate "Realtek SMI Ethernet switch family support"
select NET_DSA_TAG_RTL4_A
+ select NET_DSA_TAG_RTL8_4
select FIXED_PHY
select IRQ_DOMAIN
select REALTEK_PHY
diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile
index f3598c040994..8da1569a34e6 100644
--- a/drivers/net/dsa/Makefile
+++ b/drivers/net/dsa/Makefile
@@ -10,7 +10,7 @@ obj-$(CONFIG_NET_DSA_MT7530) += mt7530.o
obj-$(CONFIG_NET_DSA_MV88E6060) += mv88e6060.o
obj-$(CONFIG_NET_DSA_QCA8K) += qca8k.o
obj-$(CONFIG_NET_DSA_REALTEK_SMI) += realtek-smi.o
-realtek-smi-objs := realtek-smi-core.o rtl8366.o rtl8366rb.o
+realtek-smi-objs := realtek-smi-core.o rtl8366.o rtl8366rb.o rtl8365mb.o
obj-$(CONFIG_NET_DSA_SMSC_LAN9303) += lan9303-core.o
obj-$(CONFIG_NET_DSA_SMSC_LAN9303_I2C) += lan9303_i2c.o
obj-$(CONFIG_NET_DSA_SMSC_LAN9303_MDIO) += lan9303_mdio.o
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index 604f54112665..06279ba64cc8 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -1222,7 +1222,7 @@ static void b53_adjust_link(struct dsa_switch *ds, int port,
return;
/* Enable flow control on BCM5301x's CPU port */
- if (is5301x(dev) && port == dev->cpu_port)
+ if (is5301x(dev) && dsa_is_cpu_port(ds, port))
tx_pause = rx_pause = true;
if (phydev->pause) {
@@ -1291,12 +1291,6 @@ static void b53_adjust_link(struct dsa_switch *ds, int port,
return;
}
}
- } else if (is5301x(dev)) {
- if (port != dev->cpu_port) {
- b53_force_port_config(dev, dev->cpu_port, 2000,
- DUPLEX_FULL, true, true);
- b53_force_link(dev, dev->cpu_port, 1);
- }
}
/* Re-negotiate EEE if it was enabled already */
@@ -2302,33 +2296,30 @@ static const struct b53_chip_data b53_switch_chips[] = {
.chip_id = BCM5325_DEVICE_ID,
.dev_name = "BCM5325",
.vlans = 16,
- .enabled_ports = 0x1f,
+ .enabled_ports = 0x3f,
.arl_bins = 2,
.arl_buckets = 1024,
.imp_port = 5,
- .cpu_port = B53_CPU_PORT_25,
.duplex_reg = B53_DUPLEX_STAT_FE,
},
{
.chip_id = BCM5365_DEVICE_ID,
.dev_name = "BCM5365",
.vlans = 256,
- .enabled_ports = 0x1f,
+ .enabled_ports = 0x3f,
.arl_bins = 2,
.arl_buckets = 1024,
.imp_port = 5,
- .cpu_port = B53_CPU_PORT_25,
.duplex_reg = B53_DUPLEX_STAT_FE,
},
{
.chip_id = BCM5389_DEVICE_ID,
.dev_name = "BCM5389",
.vlans = 4096,
- .enabled_ports = 0x1f,
+ .enabled_ports = 0x11f,
.arl_bins = 4,
.arl_buckets = 1024,
.imp_port = 8,
- .cpu_port = B53_CPU_PORT,
.vta_regs = B53_VTA_REGS,
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
@@ -2338,11 +2329,10 @@ static const struct b53_chip_data b53_switch_chips[] = {
.chip_id = BCM5395_DEVICE_ID,
.dev_name = "BCM5395",
.vlans = 4096,
- .enabled_ports = 0x1f,
+ .enabled_ports = 0x11f,
.arl_bins = 4,
.arl_buckets = 1024,
.imp_port = 8,
- .cpu_port = B53_CPU_PORT,
.vta_regs = B53_VTA_REGS,
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
@@ -2352,11 +2342,10 @@ static const struct b53_chip_data b53_switch_chips[] = {
.chip_id = BCM5397_DEVICE_ID,
.dev_name = "BCM5397",
.vlans = 4096,
- .enabled_ports = 0x1f,
+ .enabled_ports = 0x11f,
.arl_bins = 4,
.arl_buckets = 1024,
.imp_port = 8,
- .cpu_port = B53_CPU_PORT,
.vta_regs = B53_VTA_REGS_9798,
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
@@ -2366,11 +2355,10 @@ static const struct b53_chip_data b53_switch_chips[] = {
.chip_id = BCM5398_DEVICE_ID,
.dev_name = "BCM5398",
.vlans = 4096,
- .enabled_ports = 0x7f,
+ .enabled_ports = 0x17f,
.arl_bins = 4,
.arl_buckets = 1024,
.imp_port = 8,
- .cpu_port = B53_CPU_PORT,
.vta_regs = B53_VTA_REGS_9798,
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
@@ -2380,12 +2368,11 @@ static const struct b53_chip_data b53_switch_chips[] = {
.chip_id = BCM53115_DEVICE_ID,
.dev_name = "BCM53115",
.vlans = 4096,
- .enabled_ports = 0x1f,
+ .enabled_ports = 0x11f,
.arl_bins = 4,
.arl_buckets = 1024,
.vta_regs = B53_VTA_REGS,
.imp_port = 8,
- .cpu_port = B53_CPU_PORT,
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
@@ -2394,11 +2381,10 @@ static const struct b53_chip_data b53_switch_chips[] = {
.chip_id = BCM53125_DEVICE_ID,
.dev_name = "BCM53125",
.vlans = 4096,
- .enabled_ports = 0xff,
+ .enabled_ports = 0x1ff,
.arl_bins = 4,
.arl_buckets = 1024,
.imp_port = 8,
- .cpu_port = B53_CPU_PORT,
.vta_regs = B53_VTA_REGS,
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
@@ -2412,7 +2398,6 @@ static const struct b53_chip_data b53_switch_chips[] = {
.arl_bins = 4,
.arl_buckets = 1024,
.imp_port = 8,
- .cpu_port = B53_CPU_PORT,
.vta_regs = B53_VTA_REGS,
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
@@ -2426,7 +2411,6 @@ static const struct b53_chip_data b53_switch_chips[] = {
.arl_bins = 4,
.arl_buckets = 1024,
.imp_port = 8,
- .cpu_port = B53_CPU_PORT,
.vta_regs = B53_VTA_REGS_63XX,
.duplex_reg = B53_DUPLEX_STAT_63XX,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK_63XX,
@@ -2436,11 +2420,10 @@ static const struct b53_chip_data b53_switch_chips[] = {
.chip_id = BCM53010_DEVICE_ID,
.dev_name = "BCM53010",
.vlans = 4096,
- .enabled_ports = 0x1f,
+ .enabled_ports = 0x1bf,
.arl_bins = 4,
.arl_buckets = 1024,
.imp_port = 8,
- .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
.vta_regs = B53_VTA_REGS,
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
@@ -2454,7 +2437,6 @@ static const struct b53_chip_data b53_switch_chips[] = {
.arl_bins = 4,
.arl_buckets = 1024,
.imp_port = 8,
- .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
.vta_regs = B53_VTA_REGS,
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
@@ -2468,7 +2450,6 @@ static const struct b53_chip_data b53_switch_chips[] = {
.arl_bins = 4,
.arl_buckets = 1024,
.imp_port = 8,
- .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
.vta_regs = B53_VTA_REGS,
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
@@ -2478,11 +2459,10 @@ static const struct b53_chip_data b53_switch_chips[] = {
.chip_id = BCM53018_DEVICE_ID,
.dev_name = "BCM53018",
.vlans = 4096,
- .enabled_ports = 0x1f,
+ .enabled_ports = 0x1bf,
.arl_bins = 4,
.arl_buckets = 1024,
.imp_port = 8,
- .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
.vta_regs = B53_VTA_REGS,
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
@@ -2492,11 +2472,10 @@ static const struct b53_chip_data b53_switch_chips[] = {
.chip_id = BCM53019_DEVICE_ID,
.dev_name = "BCM53019",
.vlans = 4096,
- .enabled_ports = 0x1f,
+ .enabled_ports = 0x1bf,
.arl_bins = 4,
.arl_buckets = 1024,
.imp_port = 8,
- .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
.vta_regs = B53_VTA_REGS,
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
@@ -2510,7 +2489,6 @@ static const struct b53_chip_data b53_switch_chips[] = {
.arl_bins = 4,
.arl_buckets = 1024,
.imp_port = 8,
- .cpu_port = B53_CPU_PORT,
.vta_regs = B53_VTA_REGS,
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
@@ -2524,7 +2502,6 @@ static const struct b53_chip_data b53_switch_chips[] = {
.arl_bins = 4,
.arl_buckets = 1024,
.imp_port = 8,
- .cpu_port = B53_CPU_PORT,
.vta_regs = B53_VTA_REGS,
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
@@ -2539,7 +2516,6 @@ static const struct b53_chip_data b53_switch_chips[] = {
.arl_bins = 4,
.arl_buckets = 256,
.imp_port = 8,
- .cpu_port = 8, /* TODO: ports 4, 5, 8 */
.vta_regs = B53_VTA_REGS,
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
@@ -2553,7 +2529,6 @@ static const struct b53_chip_data b53_switch_chips[] = {
.arl_bins = 4,
.arl_buckets = 1024,
.imp_port = 8,
- .cpu_port = B53_CPU_PORT,
.vta_regs = B53_VTA_REGS,
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
@@ -2567,7 +2542,6 @@ static const struct b53_chip_data b53_switch_chips[] = {
.arl_bins = 4,
.arl_buckets = 256,
.imp_port = 8,
- .cpu_port = B53_CPU_PORT,
.vta_regs = B53_VTA_REGS,
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
@@ -2593,7 +2567,6 @@ static int b53_switch_init(struct b53_device *dev)
dev->vta_regs[2] = chip->vta_regs[2];
dev->jumbo_pm_reg = chip->jumbo_pm_reg;
dev->imp_port = chip->imp_port;
- dev->cpu_port = chip->cpu_port;
dev->num_vlans = chip->vlans;
dev->num_arl_bins = chip->arl_bins;
dev->num_arl_buckets = chip->arl_buckets;
@@ -2625,16 +2598,8 @@ static int b53_switch_init(struct b53_device *dev)
break;
#endif
}
- } else if (dev->chip_id == BCM53115_DEVICE_ID) {
- u64 strap_value;
-
- b53_read48(dev, B53_STAT_PAGE, B53_STRAP_VALUE, &strap_value);
- /* use second IMP port if GMII is enabled */
- if (strap_value & SV_GMII_CTRL_115)
- dev->cpu_port = 5;
}
- dev->enabled_ports |= BIT(dev->cpu_port);
dev->num_ports = fls(dev->enabled_ports);
dev->ds->num_ports = min_t(unsigned int, dev->num_ports, DSA_MAX_PORTS);
diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h
index 959a52d41f0a..544101e74bca 100644
--- a/drivers/net/dsa/b53/b53_priv.h
+++ b/drivers/net/dsa/b53/b53_priv.h
@@ -124,7 +124,6 @@ struct b53_device {
/* used ports mask */
u16 enabled_ports;
unsigned int imp_port;
- unsigned int cpu_port;
/* connect specific data */
u8 current_page;
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 7578a5c38df5..a86ddc4bb897 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -667,7 +667,9 @@ static u32 bcm_sf2_sw_get_phy_flags(struct dsa_switch *ds, int port)
if (priv->int_phy_mask & BIT(port))
return priv->hw_params.gphy_rev;
else
- return 0;
+ return PHY_BRCM_AUTO_PWRDWN_ENABLE |
+ PHY_BRCM_DIS_TXCRXC_NOENRGY |
+ PHY_BRCM_IDDQ_SUSPEND;
}
static void bcm_sf2_sw_validate(struct dsa_switch *ds, int port,
diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c
index 341236dcbdb4..83808e7dbdda 100644
--- a/drivers/net/dsa/ocelot/felix.c
+++ b/drivers/net/dsa/ocelot/felix.c
@@ -958,8 +958,10 @@ static int felix_parse_dt(struct felix *felix, phy_interface_t *port_phy_modes)
switch_node = dev->of_node;
ports_node = of_get_child_by_name(switch_node, "ports");
+ if (!ports_node)
+ ports_node = of_get_child_by_name(switch_node, "ethernet-ports");
if (!ports_node) {
- dev_err(dev, "Incorrect bindings: absent \"ports\" node\n");
+ dev_err(dev, "Incorrect bindings: absent \"ports\" or \"ethernet-ports\" node\n");
return -ENODEV;
}
diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c
index a984f06f6f04..ea7f12778922 100644
--- a/drivers/net/dsa/qca8k.c
+++ b/drivers/net/dsa/qca8k.c
@@ -889,62 +889,183 @@ qca8k_setup_mdio_bus(struct qca8k_priv *priv)
}
static int
-qca8k_setup_of_rgmii_delay(struct qca8k_priv *priv)
+qca8k_setup_mac_pwr_sel(struct qca8k_priv *priv)
{
- struct device_node *port_dn;
- phy_interface_t mode;
- struct dsa_port *dp;
- u32 val;
+ u32 mask = 0;
+ int ret = 0;
- /* CPU port is already checked */
- dp = dsa_to_port(priv->ds, 0);
+ /* SoC specific settings for ipq8064.
+ * If more device require this consider adding
+ * a dedicated binding.
+ */
+ if (of_machine_is_compatible("qcom,ipq8064"))
+ mask |= QCA8K_MAC_PWR_RGMII0_1_8V;
+
+ /* SoC specific settings for ipq8065 */
+ if (of_machine_is_compatible("qcom,ipq8065"))
+ mask |= QCA8K_MAC_PWR_RGMII1_1_8V;
+
+ if (mask) {
+ ret = qca8k_rmw(priv, QCA8K_REG_MAC_PWR_SEL,
+ QCA8K_MAC_PWR_RGMII0_1_8V |
+ QCA8K_MAC_PWR_RGMII1_1_8V,
+ mask);
+ }
+
+ return ret;
+}
- port_dn = dp->dn;
+static int qca8k_find_cpu_port(struct dsa_switch *ds)
+{
+ struct qca8k_priv *priv = ds->priv;
- /* Check if port 0 is set to the correct type */
- of_get_phy_mode(port_dn, &mode);
- if (mode != PHY_INTERFACE_MODE_RGMII_ID &&
- mode != PHY_INTERFACE_MODE_RGMII_RXID &&
- mode != PHY_INTERFACE_MODE_RGMII_TXID) {
+ /* Find the connected cpu port. Valid port are 0 or 6 */
+ if (dsa_is_cpu_port(ds, 0))
return 0;
+
+ dev_dbg(priv->dev, "port 0 is not the CPU port. Checking port 6");
+
+ if (dsa_is_cpu_port(ds, 6))
+ return 6;
+
+ return -EINVAL;
+}
+
+static int
+qca8k_setup_of_pws_reg(struct qca8k_priv *priv)
+{
+ struct device_node *node = priv->dev->of_node;
+ const struct qca8k_match_data *data;
+ u32 val = 0;
+ int ret;
+
+ /* QCA8327 require to set to the correct mode.
+ * His bigger brother QCA8328 have the 172 pin layout.
+ * Should be applied by default but we set this just to make sure.
+ */
+ if (priv->switch_id == QCA8K_ID_QCA8327) {
+ data = of_device_get_match_data(priv->dev);
+
+ /* Set the correct package of 148 pin for QCA8327 */
+ if (data->reduced_package)
+ val |= QCA8327_PWS_PACKAGE148_EN;
+
+ ret = qca8k_rmw(priv, QCA8K_REG_PWS, QCA8327_PWS_PACKAGE148_EN,
+ val);
+ if (ret)
+ return ret;
}
- switch (mode) {
- case PHY_INTERFACE_MODE_RGMII_ID:
- case PHY_INTERFACE_MODE_RGMII_RXID:
- if (of_property_read_u32(port_dn, "rx-internal-delay-ps", &val))
- val = 2;
- else
- /* Switch regs accept value in ns, convert ps to ns */
- val = val / 1000;
+ if (of_property_read_bool(node, "qca,ignore-power-on-sel"))
+ val |= QCA8K_PWS_POWER_ON_SEL;
- if (val > QCA8K_MAX_DELAY) {
- dev_err(priv->dev, "rgmii rx delay is limited to a max value of 3ns, setting to the max value");
- val = 3;
+ if (of_property_read_bool(node, "qca,led-open-drain")) {
+ if (!(val & QCA8K_PWS_POWER_ON_SEL)) {
+ dev_err(priv->dev, "qca,led-open-drain require qca,ignore-power-on-sel to be set.");
+ return -EINVAL;
}
- priv->rgmii_rx_delay = val;
- /* Stop here if we need to check only for rx delay */
- if (mode != PHY_INTERFACE_MODE_RGMII_ID)
- break;
+ val |= QCA8K_PWS_LED_OPEN_EN_CSR;
+ }
- fallthrough;
- case PHY_INTERFACE_MODE_RGMII_TXID:
- if (of_property_read_u32(port_dn, "tx-internal-delay-ps", &val))
- val = 1;
- else
- /* Switch regs accept value in ns, convert ps to ns */
- val = val / 1000;
+ return qca8k_rmw(priv, QCA8K_REG_PWS,
+ QCA8K_PWS_LED_OPEN_EN_CSR | QCA8K_PWS_POWER_ON_SEL,
+ val);
+}
- if (val > QCA8K_MAX_DELAY) {
- dev_err(priv->dev, "rgmii tx delay is limited to a max value of 3ns, setting to the max value");
- val = 3;
- }
+static int
+qca8k_parse_port_config(struct qca8k_priv *priv)
+{
+ int port, cpu_port_index = -1, ret;
+ struct device_node *port_dn;
+ phy_interface_t mode;
+ struct dsa_port *dp;
+ u32 delay;
- priv->rgmii_tx_delay = val;
- break;
- default:
- return 0;
+ /* We have 2 CPU port. Check them */
+ for (port = 0; port < QCA8K_NUM_PORTS && cpu_port_index < QCA8K_NUM_CPU_PORTS; port++) {
+ /* Skip every other port */
+ if (port != 0 && port != 6)
+ continue;
+
+ dp = dsa_to_port(priv->ds, port);
+ port_dn = dp->dn;
+ cpu_port_index++;
+
+ if (!of_device_is_available(port_dn))
+ continue;
+
+ ret = of_get_phy_mode(port_dn, &mode);
+ if (ret)
+ continue;
+
+ switch (mode) {
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_SGMII:
+ delay = 0;
+
+ if (!of_property_read_u32(port_dn, "tx-internal-delay-ps", &delay))
+ /* Switch regs accept value in ns, convert ps to ns */
+ delay = delay / 1000;
+ else if (mode == PHY_INTERFACE_MODE_RGMII_ID ||
+ mode == PHY_INTERFACE_MODE_RGMII_TXID)
+ delay = 1;
+
+ if (delay > QCA8K_MAX_DELAY) {
+ dev_err(priv->dev, "rgmii tx delay is limited to a max value of 3ns, setting to the max value");
+ delay = 3;
+ }
+
+ priv->ports_config.rgmii_tx_delay[cpu_port_index] = delay;
+
+ delay = 0;
+
+ if (!of_property_read_u32(port_dn, "rx-internal-delay-ps", &delay))
+ /* Switch regs accept value in ns, convert ps to ns */
+ delay = delay / 1000;
+ else if (mode == PHY_INTERFACE_MODE_RGMII_ID ||
+ mode == PHY_INTERFACE_MODE_RGMII_RXID)
+ delay = 2;
+
+ if (delay > QCA8K_MAX_DELAY) {
+ dev_err(priv->dev, "rgmii rx delay is limited to a max value of 3ns, setting to the max value");
+ delay = 3;
+ }
+
+ priv->ports_config.rgmii_rx_delay[cpu_port_index] = delay;
+
+ /* Skip sgmii parsing for rgmii* mode */
+ if (mode == PHY_INTERFACE_MODE_RGMII ||
+ mode == PHY_INTERFACE_MODE_RGMII_ID ||
+ mode == PHY_INTERFACE_MODE_RGMII_TXID ||
+ mode == PHY_INTERFACE_MODE_RGMII_RXID)
+ break;
+
+ if (of_property_read_bool(port_dn, "qca,sgmii-txclk-falling-edge"))
+ priv->ports_config.sgmii_tx_clk_falling_edge = true;
+
+ if (of_property_read_bool(port_dn, "qca,sgmii-rxclk-falling-edge"))
+ priv->ports_config.sgmii_rx_clk_falling_edge = true;
+
+ if (of_property_read_bool(port_dn, "qca,sgmii-enable-pll")) {
+ priv->ports_config.sgmii_enable_pll = true;
+
+ if (priv->switch_id == QCA8K_ID_QCA8327) {
+ dev_err(priv->dev, "SGMII PLL should NOT be enabled for qca8327. Aborting enabling");
+ priv->ports_config.sgmii_enable_pll = false;
+ }
+
+ if (priv->switch_revision < 2)
+ dev_warn(priv->dev, "SGMII PLL should NOT be enabled for qca8337 with revision 2 or more.");
+ }
+
+ break;
+ default:
+ continue;
+ }
}
return 0;
@@ -954,15 +1075,20 @@ static int
qca8k_setup(struct dsa_switch *ds)
{
struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
- int ret, i;
+ int cpu_port, ret, i;
u32 mask;
- /* Make sure that port 0 is the cpu port */
- if (!dsa_is_cpu_port(ds, 0)) {
- dev_err(priv->dev, "port 0 is not the CPU port");
- return -EINVAL;
+ cpu_port = qca8k_find_cpu_port(ds);
+ if (cpu_port < 0) {
+ dev_err(priv->dev, "No cpu port configured in both cpu port0 and port6");
+ return cpu_port;
}
+ /* Parse CPU port config to be later used in phy_link mac_config */
+ ret = qca8k_parse_port_config(priv);
+ if (ret)
+ return ret;
+
mutex_init(&priv->reg_mutex);
/* Start by setting up the register mapping */
@@ -975,7 +1101,11 @@ qca8k_setup(struct dsa_switch *ds)
if (ret)
return ret;
- ret = qca8k_setup_of_rgmii_delay(priv);
+ ret = qca8k_setup_of_pws_reg(priv);
+ if (ret)
+ return ret;
+
+ ret = qca8k_setup_mac_pwr_sel(priv);
if (ret)
return ret;
@@ -992,41 +1122,49 @@ qca8k_setup(struct dsa_switch *ds)
if (ret)
dev_warn(priv->dev, "mib init failed");
- /* Enable QCA header mode on the cpu port */
- ret = qca8k_write(priv, QCA8K_REG_PORT_HDR_CTRL(QCA8K_CPU_PORT),
- QCA8K_PORT_HDR_CTRL_ALL << QCA8K_PORT_HDR_CTRL_TX_S |
- QCA8K_PORT_HDR_CTRL_ALL << QCA8K_PORT_HDR_CTRL_RX_S);
- if (ret) {
- dev_err(priv->dev, "failed enabling QCA header mode");
- return ret;
- }
-
- /* Disable forwarding by default on all ports */
+ /* Initial setup of all ports */
for (i = 0; i < QCA8K_NUM_PORTS; i++) {
+ /* Disable forwarding by default on all ports */
ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
QCA8K_PORT_LOOKUP_MEMBER, 0);
if (ret)
return ret;
- }
- /* Disable MAC by default on all ports */
- for (i = 1; i < QCA8K_NUM_PORTS; i++)
- qca8k_port_set_status(priv, i, 0);
+ /* Enable QCA header mode on all cpu ports */
+ if (dsa_is_cpu_port(ds, i)) {
+ ret = qca8k_write(priv, QCA8K_REG_PORT_HDR_CTRL(i),
+ QCA8K_PORT_HDR_CTRL_ALL << QCA8K_PORT_HDR_CTRL_TX_S |
+ QCA8K_PORT_HDR_CTRL_ALL << QCA8K_PORT_HDR_CTRL_RX_S);
+ if (ret) {
+ dev_err(priv->dev, "failed enabling QCA header mode");
+ return ret;
+ }
+ }
+
+ /* Disable MAC by default on all user ports */
+ if (dsa_is_user_port(ds, i))
+ qca8k_port_set_status(priv, i, 0);
+ }
- /* Forward all unknown frames to CPU port for Linux processing */
+ /* Forward all unknown frames to CPU port for Linux processing
+ * Notice that in multi-cpu config only one port should be set
+ * for igmp, unknown, multicast and broadcast packet
+ */
ret = qca8k_write(priv, QCA8K_REG_GLOBAL_FW_CTRL1,
- BIT(0) << QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_S |
- BIT(0) << QCA8K_GLOBAL_FW_CTRL1_BC_DP_S |
- BIT(0) << QCA8K_GLOBAL_FW_CTRL1_MC_DP_S |
- BIT(0) << QCA8K_GLOBAL_FW_CTRL1_UC_DP_S);
+ BIT(cpu_port) << QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_S |
+ BIT(cpu_port) << QCA8K_GLOBAL_FW_CTRL1_BC_DP_S |
+ BIT(cpu_port) << QCA8K_GLOBAL_FW_CTRL1_MC_DP_S |
+ BIT(cpu_port) << QCA8K_GLOBAL_FW_CTRL1_UC_DP_S);
if (ret)
return ret;
- /* Setup connection between CPU port & user ports */
+ /* Setup connection between CPU port & user ports
+ * Configure specific switch configuration for ports
+ */
for (i = 0; i < QCA8K_NUM_PORTS; i++) {
/* CPU port gets connected to all user ports of the switch */
if (dsa_is_cpu_port(ds, i)) {
- ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(QCA8K_CPU_PORT),
+ ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
QCA8K_PORT_LOOKUP_MEMBER, dsa_user_ports(ds));
if (ret)
return ret;
@@ -1038,7 +1176,7 @@ qca8k_setup(struct dsa_switch *ds)
ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
QCA8K_PORT_LOOKUP_MEMBER,
- BIT(QCA8K_CPU_PORT));
+ BIT(cpu_port));
if (ret)
return ret;
@@ -1063,16 +1201,14 @@ qca8k_setup(struct dsa_switch *ds)
if (ret)
return ret;
}
- }
- /* The port 5 of the qca8337 have some problem in flood condition. The
- * original legacy driver had some specific buffer and priority settings
- * for the different port suggested by the QCA switch team. Add this
- * missing settings to improve switch stability under load condition.
- * This problem is limited to qca8337 and other qca8k switch are not affected.
- */
- if (priv->switch_id == QCA8K_ID_QCA8337) {
- for (i = 0; i < QCA8K_NUM_PORTS; i++) {
+ /* The port 5 of the qca8337 have some problem in flood condition. The
+ * original legacy driver had some specific buffer and priority settings
+ * for the different port suggested by the QCA switch team. Add this
+ * missing settings to improve switch stability under load condition.
+ * This problem is limited to qca8337 and other qca8k switch are not affected.
+ */
+ if (priv->switch_id == QCA8K_ID_QCA8337) {
switch (i) {
/* The 2 CPU port and port 5 requires some different
* priority than any other ports.
@@ -1108,6 +1244,12 @@ qca8k_setup(struct dsa_switch *ds)
QCA8K_PORT_HOL_CTRL1_WRED_EN,
mask);
}
+
+ /* Set initial MTU for every port.
+ * We have only have a general MTU setting. So track
+ * every port and set the max across all port.
+ */
+ priv->port_mtu[i] = ETH_FRAME_LEN + ETH_FCS_LEN;
}
/* Special GLOBAL_FC_THRESH value are needed for ar8327 switch */
@@ -1121,8 +1263,6 @@ qca8k_setup(struct dsa_switch *ds)
}
/* Setup our port MTUs to match power on defaults */
- for (i = 0; i < QCA8K_NUM_PORTS; i++)
- priv->port_mtu[i] = ETH_FRAME_LEN + ETH_FCS_LEN;
ret = qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, ETH_FRAME_LEN + ETH_FCS_LEN);
if (ret)
dev_warn(priv->dev, "failed setting MTU settings");
@@ -1137,12 +1277,53 @@ qca8k_setup(struct dsa_switch *ds)
}
static void
+qca8k_mac_config_setup_internal_delay(struct qca8k_priv *priv, int cpu_port_index,
+ u32 reg)
+{
+ u32 delay, val = 0;
+ int ret;
+
+ /* Delay can be declared in 3 different way.
+ * Mode to rgmii and internal-delay standard binding defined
+ * rgmii-id or rgmii-tx/rx phy mode set.
+ * The parse logic set a delay different than 0 only when one
+ * of the 3 different way is used. In all other case delay is
+ * not enabled. With ID or TX/RXID delay is enabled and set
+ * to the default and recommended value.
+ */
+ if (priv->ports_config.rgmii_tx_delay[cpu_port_index]) {
+ delay = priv->ports_config.rgmii_tx_delay[cpu_port_index];
+
+ val |= QCA8K_PORT_PAD_RGMII_TX_DELAY(delay) |
+ QCA8K_PORT_PAD_RGMII_TX_DELAY_EN;
+ }
+
+ if (priv->ports_config.rgmii_rx_delay[cpu_port_index]) {
+ delay = priv->ports_config.rgmii_rx_delay[cpu_port_index];
+
+ val |= QCA8K_PORT_PAD_RGMII_RX_DELAY(delay) |
+ QCA8K_PORT_PAD_RGMII_RX_DELAY_EN;
+ }
+
+ /* Set RGMII delay based on the selected values */
+ ret = qca8k_rmw(priv, reg,
+ QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK |
+ QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK |
+ QCA8K_PORT_PAD_RGMII_TX_DELAY_EN |
+ QCA8K_PORT_PAD_RGMII_RX_DELAY_EN,
+ val);
+ if (ret)
+ dev_err(priv->dev, "Failed to set internal delay for CPU port%d",
+ cpu_port_index == QCA8K_CPU_PORT0 ? 0 : 6);
+}
+
+static void
qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
const struct phylink_link_state *state)
{
struct qca8k_priv *priv = ds->priv;
+ int cpu_port_index, ret;
u32 reg, val;
- int ret;
switch (port) {
case 0: /* 1st CPU port */
@@ -1154,6 +1335,7 @@ qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
return;
reg = QCA8K_REG_PORT0_PAD_CTRL;
+ cpu_port_index = QCA8K_CPU_PORT0;
break;
case 1:
case 2:
@@ -1172,6 +1354,7 @@ qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
return;
reg = QCA8K_REG_PORT6_PAD_CTRL;
+ cpu_port_index = QCA8K_CPU_PORT6;
break;
default:
dev_err(ds->dev, "%s: unsupported port: %i\n", __func__, port);
@@ -1186,23 +1369,18 @@ qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
switch (state->interface) {
case PHY_INTERFACE_MODE_RGMII:
- /* RGMII mode means no delay so don't enable the delay */
- qca8k_write(priv, reg, QCA8K_PORT_PAD_RGMII_EN);
- break;
case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII_TXID:
case PHY_INTERFACE_MODE_RGMII_RXID:
- /* RGMII_ID needs internal delay. This is enabled through
- * PORT5_PAD_CTRL for all ports, rather than individual port
- * registers
+ qca8k_write(priv, reg, QCA8K_PORT_PAD_RGMII_EN);
+
+ /* Configure rgmii delay */
+ qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg);
+
+ /* QCA8337 requires to set rgmii rx delay for all ports.
+ * This is enabled through PORT5_PAD_CTRL for all ports,
+ * rather than individual port registers.
*/
- qca8k_write(priv, reg,
- QCA8K_PORT_PAD_RGMII_EN |
- QCA8K_PORT_PAD_RGMII_TX_DELAY(priv->rgmii_tx_delay) |
- QCA8K_PORT_PAD_RGMII_RX_DELAY(priv->rgmii_rx_delay) |
- QCA8K_PORT_PAD_RGMII_TX_DELAY_EN |
- QCA8K_PORT_PAD_RGMII_RX_DELAY_EN);
- /* QCA8337 requires to set rgmii rx delay */
if (priv->switch_id == QCA8K_ID_QCA8337)
qca8k_write(priv, QCA8K_REG_PORT5_PAD_CTRL,
QCA8K_PORT_PAD_RGMII_RX_DELAY_EN);
@@ -1227,8 +1405,11 @@ qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
if (ret)
return;
- val |= QCA8K_SGMII_EN_PLL | QCA8K_SGMII_EN_RX |
- QCA8K_SGMII_EN_TX | QCA8K_SGMII_EN_SD;
+ val |= QCA8K_SGMII_EN_SD;
+
+ if (priv->ports_config.sgmii_enable_pll)
+ val |= QCA8K_SGMII_EN_PLL | QCA8K_SGMII_EN_RX |
+ QCA8K_SGMII_EN_TX;
if (dsa_is_cpu_port(ds, port)) {
/* CPU port, we're talking to the CPU MAC, be a PHY */
@@ -1243,6 +1424,35 @@ qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
}
qca8k_write(priv, QCA8K_REG_SGMII_CTRL, val);
+
+ /* For qca8327/qca8328/qca8334/qca8338 sgmii is unique and
+ * falling edge is set writing in the PORT0 PAD reg
+ */
+ if (priv->switch_id == QCA8K_ID_QCA8327 ||
+ priv->switch_id == QCA8K_ID_QCA8337)
+ reg = QCA8K_REG_PORT0_PAD_CTRL;
+
+ val = 0;
+
+ /* SGMII Clock phase configuration */
+ if (priv->ports_config.sgmii_rx_clk_falling_edge)
+ val |= QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE;
+
+ if (priv->ports_config.sgmii_tx_clk_falling_edge)
+ val |= QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE;
+
+ if (val)
+ ret = qca8k_rmw(priv, reg,
+ QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE |
+ QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE,
+ val);
+
+ /* From original code is reported port instability as SGMII also
+ * require delay set. Apply advised values here or take them from DT.
+ */
+ if (state->interface == PHY_INTERFACE_MODE_SGMII)
+ qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg);
+
break;
default:
dev_err(ds->dev, "xMII mode %s not supported for port %d\n",
@@ -1522,10 +1732,15 @@ static int
qca8k_port_bridge_join(struct dsa_switch *ds, int port, struct net_device *br)
{
struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
- int port_mask = BIT(QCA8K_CPU_PORT);
+ int port_mask, cpu_port;
int i, ret;
- for (i = 1; i < QCA8K_NUM_PORTS; i++) {
+ cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
+ port_mask = BIT(cpu_port);
+
+ for (i = 0; i < QCA8K_NUM_PORTS; i++) {
+ if (dsa_is_cpu_port(ds, i))
+ continue;
if (dsa_to_port(ds, i)->bridge_dev != br)
continue;
/* Add this port to the portvlan mask of the other ports
@@ -1551,9 +1766,13 @@ static void
qca8k_port_bridge_leave(struct dsa_switch *ds, int port, struct net_device *br)
{
struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
- int i;
+ int cpu_port, i;
- for (i = 1; i < QCA8K_NUM_PORTS; i++) {
+ cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
+
+ for (i = 0; i < QCA8K_NUM_PORTS; i++) {
+ if (dsa_is_cpu_port(ds, i))
+ continue;
if (dsa_to_port(ds, i)->bridge_dev != br)
continue;
/* Remove this port to the portvlan mask of the other ports
@@ -1568,7 +1787,7 @@ qca8k_port_bridge_leave(struct dsa_switch *ds, int port, struct net_device *br)
* this port
*/
qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
- QCA8K_PORT_LOOKUP_MEMBER, BIT(QCA8K_CPU_PORT));
+ QCA8K_PORT_LOOKUP_MEMBER, BIT(cpu_port));
}
static int
@@ -1939,7 +2158,12 @@ static int qca8k_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(qca8k_pm_ops,
qca8k_suspend, qca8k_resume);
-static const struct qca8k_match_data qca832x = {
+static const struct qca8k_match_data qca8327 = {
+ .id = QCA8K_ID_QCA8327,
+ .reduced_package = true,
+};
+
+static const struct qca8k_match_data qca8328 = {
.id = QCA8K_ID_QCA8327,
};
@@ -1948,7 +2172,8 @@ static const struct qca8k_match_data qca833x = {
};
static const struct of_device_id qca8k_of_match[] = {
- { .compatible = "qca,qca8327", .data = &qca832x },
+ { .compatible = "qca,qca8327", .data = &qca8327 },
+ { .compatible = "qca,qca8328", .data = &qca8328 },
{ .compatible = "qca,qca8334", .data = &qca833x },
{ .compatible = "qca,qca8337", .data = &qca833x },
{ /* sentinel */ },
diff --git a/drivers/net/dsa/qca8k.h b/drivers/net/dsa/qca8k.h
index ed3b05ad6745..e10571a398c9 100644
--- a/drivers/net/dsa/qca8k.h
+++ b/drivers/net/dsa/qca8k.h
@@ -13,6 +13,7 @@
#include <linux/gpio.h>
#define QCA8K_NUM_PORTS 7
+#define QCA8K_NUM_CPU_PORTS 2
#define QCA8K_MAX_MTU 9000
#define PHY_ID_QCA8327 0x004dd034
@@ -24,8 +25,6 @@
#define QCA8K_NUM_FDB_RECORDS 2048
-#define QCA8K_CPU_PORT 0
-
#define QCA8K_PORT_VID_DEF 1
/* Global control registers */
@@ -35,16 +34,26 @@
#define QCA8K_MASK_CTRL_DEVICE_ID_MASK GENMASK(15, 8)
#define QCA8K_MASK_CTRL_DEVICE_ID(x) ((x) >> 8)
#define QCA8K_REG_PORT0_PAD_CTRL 0x004
+#define QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE BIT(19)
+#define QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE BIT(18)
#define QCA8K_REG_PORT5_PAD_CTRL 0x008
#define QCA8K_REG_PORT6_PAD_CTRL 0x00c
#define QCA8K_PORT_PAD_RGMII_EN BIT(26)
+#define QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK GENMASK(23, 22)
#define QCA8K_PORT_PAD_RGMII_TX_DELAY(x) ((x) << 22)
+#define QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK GENMASK(21, 20)
#define QCA8K_PORT_PAD_RGMII_RX_DELAY(x) ((x) << 20)
#define QCA8K_PORT_PAD_RGMII_TX_DELAY_EN BIT(25)
#define QCA8K_PORT_PAD_RGMII_RX_DELAY_EN BIT(24)
#define QCA8K_MAX_DELAY 3
#define QCA8K_PORT_PAD_SGMII_EN BIT(7)
#define QCA8K_REG_PWS 0x010
+#define QCA8K_PWS_POWER_ON_SEL BIT(31)
+/* This reg is only valid for QCA832x and toggle the package
+ * type from 176 pin (by default) to 148 pin used on QCA8327
+ */
+#define QCA8327_PWS_PACKAGE148_EN BIT(30)
+#define QCA8K_PWS_LED_OPEN_EN_CSR BIT(24)
#define QCA8K_PWS_SERDES_AEN_DIS BIT(7)
#define QCA8K_REG_MODULE_EN 0x030
#define QCA8K_MODULE_EN_MIB BIT(0)
@@ -100,6 +109,11 @@
#define QCA8K_SGMII_MODE_CTRL_PHY (1 << 22)
#define QCA8K_SGMII_MODE_CTRL_MAC (2 << 22)
+/* MAC_PWR_SEL registers */
+#define QCA8K_REG_MAC_PWR_SEL 0x0e4
+#define QCA8K_MAC_PWR_RGMII1_1_8V BIT(18)
+#define QCA8K_MAC_PWR_RGMII0_1_8V BIT(19)
+
/* EEE control registers */
#define QCA8K_REG_EEE_CTRL 0x100
#define QCA8K_REG_EEE_CTRL_LPI_EN(_i) ((_i + 1) * 2)
@@ -248,14 +262,27 @@ struct ar8xxx_port_status {
struct qca8k_match_data {
u8 id;
+ bool reduced_package;
+};
+
+enum {
+ QCA8K_CPU_PORT0,
+ QCA8K_CPU_PORT6,
+};
+
+struct qca8k_ports_config {
+ bool sgmii_rx_clk_falling_edge;
+ bool sgmii_tx_clk_falling_edge;
+ bool sgmii_enable_pll;
+ u8 rgmii_rx_delay[QCA8K_NUM_CPU_PORTS]; /* 0: CPU port0, 1: CPU port6 */
+ u8 rgmii_tx_delay[QCA8K_NUM_CPU_PORTS]; /* 0: CPU port0, 1: CPU port6 */
};
struct qca8k_priv {
u8 switch_id;
u8 switch_revision;
- u8 rgmii_tx_delay;
- u8 rgmii_rx_delay;
bool legacy_phy_port_mapping;
+ struct qca8k_ports_config ports_config;
struct regmap *regmap;
struct mii_bus *bus;
struct ar8xxx_port_status port_sts[QCA8K_NUM_PORTS];
diff --git a/drivers/net/dsa/realtek-smi-core.c b/drivers/net/dsa/realtek-smi-core.c
index 2fcfd917b876..c66ebd0ee217 100644
--- a/drivers/net/dsa/realtek-smi-core.c
+++ b/drivers/net/dsa/realtek-smi-core.c
@@ -501,6 +501,10 @@ static const struct of_device_id realtek_smi_of_match[] = {
.compatible = "realtek,rtl8366s",
.data = NULL,
},
+ {
+ .compatible = "realtek,rtl8365mb",
+ .data = &rtl8365mb_variant,
+ },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, realtek_smi_of_match);
diff --git a/drivers/net/dsa/realtek-smi-core.h b/drivers/net/dsa/realtek-smi-core.h
index fcf465f7f922..5bfa53e2480a 100644
--- a/drivers/net/dsa/realtek-smi-core.h
+++ b/drivers/net/dsa/realtek-smi-core.h
@@ -129,9 +129,6 @@ int rtl8366_set_pvid(struct realtek_smi *smi, unsigned int port,
int rtl8366_enable_vlan4k(struct realtek_smi *smi, bool enable);
int rtl8366_enable_vlan(struct realtek_smi *smi, bool enable);
int rtl8366_reset_vlan(struct realtek_smi *smi);
-int rtl8366_init_vlan(struct realtek_smi *smi);
-int rtl8366_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
- struct netlink_ext_ack *extack);
int rtl8366_vlan_add(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan,
struct netlink_ext_ack *extack);
@@ -143,5 +140,6 @@ int rtl8366_get_sset_count(struct dsa_switch *ds, int port, int sset);
void rtl8366_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data);
extern const struct realtek_smi_variant rtl8366rb_variant;
+extern const struct realtek_smi_variant rtl8365mb_variant;
#endif /* _REALTEK_SMI_H */
diff --git a/drivers/net/dsa/rtl8365mb.c b/drivers/net/dsa/rtl8365mb.c
new file mode 100644
index 000000000000..baaae97283c5
--- /dev/null
+++ b/drivers/net/dsa/rtl8365mb.c
@@ -0,0 +1,1982 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Realtek SMI subdriver for the Realtek RTL8365MB-VC ethernet switch.
+ *
+ * Copyright (C) 2021 Alvin Šipraga <alsi@bang-olufsen.dk>
+ * Copyright (C) 2021 Michael Rasmussen <mir@bang-olufsen.dk>
+ *
+ * The RTL8365MB-VC is a 4+1 port 10/100/1000M switch controller. It includes 4
+ * integrated PHYs for the user facing ports, and an extension interface which
+ * can be connected to the CPU - or another PHY - via either MII, RMII, or
+ * RGMII. The switch is configured via the Realtek Simple Management Interface
+ * (SMI), which uses the MDIO/MDC lines.
+ *
+ * Below is a simplified block diagram of the chip and its relevant interfaces.
+ *
+ * .-----------------------------------.
+ * | |
+ * UTP <---------------> Giga PHY <-> PCS <-> P0 GMAC |
+ * UTP <---------------> Giga PHY <-> PCS <-> P1 GMAC |
+ * UTP <---------------> Giga PHY <-> PCS <-> P2 GMAC |
+ * UTP <---------------> Giga PHY <-> PCS <-> P3 GMAC |
+ * | |
+ * CPU/PHY <-MII/RMII/RGMII---> Extension <---> Extension |
+ * | interface 1 GMAC 1 |
+ * | |
+ * SMI driver/ <-MDC/SCL---> Management ~~~~~~~~~~~~~~ |
+ * EEPROM <-MDIO/SDA--> interface ~REALTEK ~~~~~ |
+ * | ~RTL8365MB ~~~ |
+ * | ~GXXXC TAIWAN~ |
+ * GPIO <--------------> Reset ~~~~~~~~~~~~~~ |
+ * | |
+ * Interrupt <----------> Link UP/DOWN events |
+ * controller | |
+ * '-----------------------------------'
+ *
+ * The driver uses DSA to integrate the 4 user and 1 extension ports into the
+ * kernel. Netdevices are created for the user ports, as are PHY devices for
+ * their integrated PHYs. The device tree firmware should also specify the link
+ * partner of the extension port - either via a fixed-link or other phy-handle.
+ * See the device tree bindings for more detailed information. Note that the
+ * driver has only been tested with a fixed-link, but in principle it should not
+ * matter.
+ *
+ * NOTE: Currently, only the RGMII interface is implemented in this driver.
+ *
+ * The interrupt line is asserted on link UP/DOWN events. The driver creates a
+ * custom irqchip to handle this interrupt and demultiplex the events by reading
+ * the status registers via SMI. Interrupts are then propagated to the relevant
+ * PHY device.
+ *
+ * The EEPROM contains initial register values which the chip will read over I2C
+ * upon hardware reset. It is also possible to omit the EEPROM. In both cases,
+ * the driver will manually reprogram some registers using jam tables to reach
+ * an initial state defined by the vendor driver.
+ *
+ * This Linux driver is written based on an OS-agnostic vendor driver from
+ * Realtek. The reference GPL-licensed sources can be found in the OpenWrt
+ * source tree under the name rtl8367c. The vendor driver claims to support a
+ * number of similar switch controllers from Realtek, but the only hardware we
+ * have is the RTL8365MB-VC. Moreover, there does not seem to be any chip under
+ * the name RTL8367C. Although one wishes that the 'C' stood for some kind of
+ * common hardware revision, there exist examples of chips with the suffix -VC
+ * which are explicitly not supported by the rtl8367c driver and which instead
+ * require the rtl8367d vendor driver. With all this uncertainty, the driver has
+ * been modestly named rtl8365mb. Future implementors may wish to rename things
+ * accordingly.
+ *
+ * In the same family of chips, some carry up to 8 user ports and up to 2
+ * extension ports. Where possible this driver tries to make things generic, but
+ * more work must be done to support these configurations. According to
+ * documentation from Realtek, the family should include the following chips:
+ *
+ * - RTL8363NB
+ * - RTL8363NB-VB
+ * - RTL8363SC
+ * - RTL8363SC-VB
+ * - RTL8364NB
+ * - RTL8364NB-VB
+ * - RTL8365MB-VC
+ * - RTL8366SC
+ * - RTL8367RB-VB
+ * - RTL8367SB
+ * - RTL8367S
+ * - RTL8370MB
+ * - RTL8310SR
+ *
+ * Some of the register logic for these additional chips has been skipped over
+ * while implementing this driver. It is therefore not possible to assume that
+ * things will work out-of-the-box for other chips, and a careful review of the
+ * vendor driver may be needed to expand support. The RTL8365MB-VC seems to be
+ * one of the simpler chips.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
+#include <linux/mutex.h>
+#include <linux/of_irq.h>
+#include <linux/regmap.h>
+#include <linux/if_bridge.h>
+
+#include "realtek-smi-core.h"
+
+/* Chip-specific data and limits */
+#define RTL8365MB_CHIP_ID_8365MB_VC 0x6367
+#define RTL8365MB_CPU_PORT_NUM_8365MB_VC 6
+#define RTL8365MB_LEARN_LIMIT_MAX_8365MB_VC 2112
+
+/* Family-specific data and limits */
+#define RTL8365MB_NUM_PHYREGS 32
+#define RTL8365MB_PHYREGMAX (RTL8365MB_NUM_PHYREGS - 1)
+#define RTL8365MB_MAX_NUM_PORTS (RTL8365MB_CPU_PORT_NUM_8365MB_VC + 1)
+
+/* Chip identification registers */
+#define RTL8365MB_CHIP_ID_REG 0x1300
+
+#define RTL8365MB_CHIP_VER_REG 0x1301
+
+#define RTL8365MB_MAGIC_REG 0x13C2
+#define RTL8365MB_MAGIC_VALUE 0x0249
+
+/* Chip reset register */
+#define RTL8365MB_CHIP_RESET_REG 0x1322
+#define RTL8365MB_CHIP_RESET_SW_MASK 0x0002
+#define RTL8365MB_CHIP_RESET_HW_MASK 0x0001
+
+/* Interrupt polarity register */
+#define RTL8365MB_INTR_POLARITY_REG 0x1100
+#define RTL8365MB_INTR_POLARITY_MASK 0x0001
+#define RTL8365MB_INTR_POLARITY_HIGH 0
+#define RTL8365MB_INTR_POLARITY_LOW 1
+
+/* Interrupt control/status register - enable/check specific interrupt types */
+#define RTL8365MB_INTR_CTRL_REG 0x1101
+#define RTL8365MB_INTR_STATUS_REG 0x1102
+#define RTL8365MB_INTR_SLIENT_START_2_MASK 0x1000
+#define RTL8365MB_INTR_SLIENT_START_MASK 0x0800
+#define RTL8365MB_INTR_ACL_ACTION_MASK 0x0200
+#define RTL8365MB_INTR_CABLE_DIAG_FIN_MASK 0x0100
+#define RTL8365MB_INTR_INTERRUPT_8051_MASK 0x0080
+#define RTL8365MB_INTR_LOOP_DETECTION_MASK 0x0040
+#define RTL8365MB_INTR_GREEN_TIMER_MASK 0x0020
+#define RTL8365MB_INTR_SPECIAL_CONGEST_MASK 0x0010
+#define RTL8365MB_INTR_SPEED_CHANGE_MASK 0x0008
+#define RTL8365MB_INTR_LEARN_OVER_MASK 0x0004
+#define RTL8365MB_INTR_METER_EXCEEDED_MASK 0x0002
+#define RTL8365MB_INTR_LINK_CHANGE_MASK 0x0001
+#define RTL8365MB_INTR_ALL_MASK \
+ (RTL8365MB_INTR_SLIENT_START_2_MASK | \
+ RTL8365MB_INTR_SLIENT_START_MASK | \
+ RTL8365MB_INTR_ACL_ACTION_MASK | \
+ RTL8365MB_INTR_CABLE_DIAG_FIN_MASK | \
+ RTL8365MB_INTR_INTERRUPT_8051_MASK | \
+ RTL8365MB_INTR_LOOP_DETECTION_MASK | \
+ RTL8365MB_INTR_GREEN_TIMER_MASK | \
+ RTL8365MB_INTR_SPECIAL_CONGEST_MASK | \
+ RTL8365MB_INTR_SPEED_CHANGE_MASK | \
+ RTL8365MB_INTR_LEARN_OVER_MASK | \
+ RTL8365MB_INTR_METER_EXCEEDED_MASK | \
+ RTL8365MB_INTR_LINK_CHANGE_MASK)
+
+/* Per-port interrupt type status registers */
+#define RTL8365MB_PORT_LINKDOWN_IND_REG 0x1106
+#define RTL8365MB_PORT_LINKDOWN_IND_MASK 0x07FF
+
+#define RTL8365MB_PORT_LINKUP_IND_REG 0x1107
+#define RTL8365MB_PORT_LINKUP_IND_MASK 0x07FF
+
+/* PHY indirect access registers */
+#define RTL8365MB_INDIRECT_ACCESS_CTRL_REG 0x1F00
+#define RTL8365MB_INDIRECT_ACCESS_CTRL_RW_MASK 0x0002
+#define RTL8365MB_INDIRECT_ACCESS_CTRL_RW_READ 0
+#define RTL8365MB_INDIRECT_ACCESS_CTRL_RW_WRITE 1
+#define RTL8365MB_INDIRECT_ACCESS_CTRL_CMD_MASK 0x0001
+#define RTL8365MB_INDIRECT_ACCESS_CTRL_CMD_VALUE 1
+#define RTL8365MB_INDIRECT_ACCESS_STATUS_REG 0x1F01
+#define RTL8365MB_INDIRECT_ACCESS_ADDRESS_REG 0x1F02
+#define RTL8365MB_INDIRECT_ACCESS_ADDRESS_OCPADR_5_1_MASK GENMASK(4, 0)
+#define RTL8365MB_INDIRECT_ACCESS_ADDRESS_PHYNUM_MASK GENMASK(6, 5)
+#define RTL8365MB_INDIRECT_ACCESS_ADDRESS_OCPADR_9_6_MASK GENMASK(11, 8)
+#define RTL8365MB_PHY_BASE 0x2000
+#define RTL8365MB_INDIRECT_ACCESS_WRITE_DATA_REG 0x1F03
+#define RTL8365MB_INDIRECT_ACCESS_READ_DATA_REG 0x1F04
+
+/* PHY OCP address prefix register */
+#define RTL8365MB_GPHY_OCP_MSB_0_REG 0x1D15
+#define RTL8365MB_GPHY_OCP_MSB_0_CFG_CPU_OCPADR_MASK 0x0FC0
+#define RTL8365MB_PHY_OCP_ADDR_PREFIX_MASK 0xFC00
+
+/* The PHY OCP addresses of PHY registers 0~31 start here */
+#define RTL8365MB_PHY_OCP_ADDR_PHYREG_BASE 0xA400
+
+/* EXT port interface mode values - used in DIGITAL_INTERFACE_SELECT */
+#define RTL8365MB_EXT_PORT_MODE_DISABLE 0
+#define RTL8365MB_EXT_PORT_MODE_RGMII 1
+#define RTL8365MB_EXT_PORT_MODE_MII_MAC 2
+#define RTL8365MB_EXT_PORT_MODE_MII_PHY 3
+#define RTL8365MB_EXT_PORT_MODE_TMII_MAC 4
+#define RTL8365MB_EXT_PORT_MODE_TMII_PHY 5
+#define RTL8365MB_EXT_PORT_MODE_GMII 6
+#define RTL8365MB_EXT_PORT_MODE_RMII_MAC 7
+#define RTL8365MB_EXT_PORT_MODE_RMII_PHY 8
+#define RTL8365MB_EXT_PORT_MODE_SGMII 9
+#define RTL8365MB_EXT_PORT_MODE_HSGMII 10
+#define RTL8365MB_EXT_PORT_MODE_1000X_100FX 11
+#define RTL8365MB_EXT_PORT_MODE_1000X 12
+#define RTL8365MB_EXT_PORT_MODE_100FX 13
+
+/* EXT port interface mode configuration registers 0~1 */
+#define RTL8365MB_DIGITAL_INTERFACE_SELECT_REG0 0x1305
+#define RTL8365MB_DIGITAL_INTERFACE_SELECT_REG1 0x13C3
+#define RTL8365MB_DIGITAL_INTERFACE_SELECT_REG(_extport) \
+ (RTL8365MB_DIGITAL_INTERFACE_SELECT_REG0 + \
+ ((_extport) >> 1) * (0x13C3 - 0x1305))
+#define RTL8365MB_DIGITAL_INTERFACE_SELECT_MODE_MASK(_extport) \
+ (0xF << (((_extport) % 2)))
+#define RTL8365MB_DIGITAL_INTERFACE_SELECT_MODE_OFFSET(_extport) \
+ (((_extport) % 2) * 4)
+
+/* EXT port RGMII TX/RX delay configuration registers 1~2 */
+#define RTL8365MB_EXT_RGMXF_REG1 0x1307
+#define RTL8365MB_EXT_RGMXF_REG2 0x13C5
+#define RTL8365MB_EXT_RGMXF_REG(_extport) \
+ (RTL8365MB_EXT_RGMXF_REG1 + \
+ (((_extport) >> 1) * (0x13C5 - 0x1307)))
+#define RTL8365MB_EXT_RGMXF_RXDELAY_MASK 0x0007
+#define RTL8365MB_EXT_RGMXF_TXDELAY_MASK 0x0008
+
+/* External port speed values - used in DIGITAL_INTERFACE_FORCE */
+#define RTL8365MB_PORT_SPEED_10M 0
+#define RTL8365MB_PORT_SPEED_100M 1
+#define RTL8365MB_PORT_SPEED_1000M 2
+
+/* EXT port force configuration registers 0~2 */
+#define RTL8365MB_DIGITAL_INTERFACE_FORCE_REG0 0x1310
+#define RTL8365MB_DIGITAL_INTERFACE_FORCE_REG1 0x1311
+#define RTL8365MB_DIGITAL_INTERFACE_FORCE_REG2 0x13C4
+#define RTL8365MB_DIGITAL_INTERFACE_FORCE_REG(_extport) \
+ (RTL8365MB_DIGITAL_INTERFACE_FORCE_REG0 + \
+ ((_extport) & 0x1) + \
+ ((((_extport) >> 1) & 0x1) * (0x13C4 - 0x1310)))
+#define RTL8365MB_DIGITAL_INTERFACE_FORCE_EN_MASK 0x1000
+#define RTL8365MB_DIGITAL_INTERFACE_FORCE_NWAY_MASK 0x0080
+#define RTL8365MB_DIGITAL_INTERFACE_FORCE_TXPAUSE_MASK 0x0040
+#define RTL8365MB_DIGITAL_INTERFACE_FORCE_RXPAUSE_MASK 0x0020
+#define RTL8365MB_DIGITAL_INTERFACE_FORCE_LINK_MASK 0x0010
+#define RTL8365MB_DIGITAL_INTERFACE_FORCE_DUPLEX_MASK 0x0004
+#define RTL8365MB_DIGITAL_INTERFACE_FORCE_SPEED_MASK 0x0003
+
+/* CPU port mask register - controls which ports are treated as CPU ports */
+#define RTL8365MB_CPU_PORT_MASK_REG 0x1219
+#define RTL8365MB_CPU_PORT_MASK_MASK 0x07FF
+
+/* CPU control register */
+#define RTL8365MB_CPU_CTRL_REG 0x121A
+#define RTL8365MB_CPU_CTRL_TRAP_PORT_EXT_MASK 0x0400
+#define RTL8365MB_CPU_CTRL_TAG_FORMAT_MASK 0x0200
+#define RTL8365MB_CPU_CTRL_RXBYTECOUNT_MASK 0x0080
+#define RTL8365MB_CPU_CTRL_TAG_POSITION_MASK 0x0040
+#define RTL8365MB_CPU_CTRL_TRAP_PORT_MASK 0x0038
+#define RTL8365MB_CPU_CTRL_INSERTMODE_MASK 0x0006
+#define RTL8365MB_CPU_CTRL_EN_MASK 0x0001
+
+/* Maximum packet length register */
+#define RTL8365MB_CFG0_MAX_LEN_REG 0x088C
+#define RTL8365MB_CFG0_MAX_LEN_MASK 0x3FFF
+
+/* Port learning limit registers */
+#define RTL8365MB_LUT_PORT_LEARN_LIMIT_BASE 0x0A20
+#define RTL8365MB_LUT_PORT_LEARN_LIMIT_REG(_physport) \
+ (RTL8365MB_LUT_PORT_LEARN_LIMIT_BASE + (_physport))
+
+/* Port isolation (forwarding mask) registers */
+#define RTL8365MB_PORT_ISOLATION_REG_BASE 0x08A2
+#define RTL8365MB_PORT_ISOLATION_REG(_physport) \
+ (RTL8365MB_PORT_ISOLATION_REG_BASE + (_physport))
+#define RTL8365MB_PORT_ISOLATION_MASK 0x07FF
+
+/* MSTP port state registers - indexed by tree instancrSTI (tree ine */
+#define RTL8365MB_MSTI_CTRL_BASE 0x0A00
+#define RTL8365MB_MSTI_CTRL_REG(_msti, _physport) \
+ (RTL8365MB_MSTI_CTRL_BASE + ((_msti) << 1) + ((_physport) >> 3))
+#define RTL8365MB_MSTI_CTRL_PORT_STATE_OFFSET(_physport) ((_physport) << 1)
+#define RTL8365MB_MSTI_CTRL_PORT_STATE_MASK(_physport) \
+ (0x3 << RTL8365MB_MSTI_CTRL_PORT_STATE_OFFSET((_physport)))
+
+/* MIB counter value registers */
+#define RTL8365MB_MIB_COUNTER_BASE 0x1000
+#define RTL8365MB_MIB_COUNTER_REG(_x) (RTL8365MB_MIB_COUNTER_BASE + (_x))
+
+/* MIB counter address register */
+#define RTL8365MB_MIB_ADDRESS_REG 0x1004
+#define RTL8365MB_MIB_ADDRESS_PORT_OFFSET 0x007C
+#define RTL8365MB_MIB_ADDRESS(_p, _x) \
+ (((RTL8365MB_MIB_ADDRESS_PORT_OFFSET) * (_p) + (_x)) >> 2)
+
+#define RTL8365MB_MIB_CTRL0_REG 0x1005
+#define RTL8365MB_MIB_CTRL0_RESET_MASK 0x0002
+#define RTL8365MB_MIB_CTRL0_BUSY_MASK 0x0001
+
+/* The DSA callback .get_stats64 runs in atomic context, so we are not allowed
+ * to block. On the other hand, accessing MIB counters absolutely requires us to
+ * block. The solution is thus to schedule work which polls the MIB counters
+ * asynchronously and updates some private data, which the callback can then
+ * fetch atomically. Three seconds should be a good enough polling interval.
+ */
+#define RTL8365MB_STATS_INTERVAL_JIFFIES (3 * HZ)
+
+enum rtl8365mb_mib_counter_index {
+ RTL8365MB_MIB_ifInOctets,
+ RTL8365MB_MIB_dot3StatsFCSErrors,
+ RTL8365MB_MIB_dot3StatsSymbolErrors,
+ RTL8365MB_MIB_dot3InPauseFrames,
+ RTL8365MB_MIB_dot3ControlInUnknownOpcodes,
+ RTL8365MB_MIB_etherStatsFragments,
+ RTL8365MB_MIB_etherStatsJabbers,
+ RTL8365MB_MIB_ifInUcastPkts,
+ RTL8365MB_MIB_etherStatsDropEvents,
+ RTL8365MB_MIB_ifInMulticastPkts,
+ RTL8365MB_MIB_ifInBroadcastPkts,
+ RTL8365MB_MIB_inMldChecksumError,
+ RTL8365MB_MIB_inIgmpChecksumError,
+ RTL8365MB_MIB_inMldSpecificQuery,
+ RTL8365MB_MIB_inMldGeneralQuery,
+ RTL8365MB_MIB_inIgmpSpecificQuery,
+ RTL8365MB_MIB_inIgmpGeneralQuery,
+ RTL8365MB_MIB_inMldLeaves,
+ RTL8365MB_MIB_inIgmpLeaves,
+ RTL8365MB_MIB_etherStatsOctets,
+ RTL8365MB_MIB_etherStatsUnderSizePkts,
+ RTL8365MB_MIB_etherOversizeStats,
+ RTL8365MB_MIB_etherStatsPkts64Octets,
+ RTL8365MB_MIB_etherStatsPkts65to127Octets,
+ RTL8365MB_MIB_etherStatsPkts128to255Octets,
+ RTL8365MB_MIB_etherStatsPkts256to511Octets,
+ RTL8365MB_MIB_etherStatsPkts512to1023Octets,
+ RTL8365MB_MIB_etherStatsPkts1024to1518Octets,
+ RTL8365MB_MIB_ifOutOctets,
+ RTL8365MB_MIB_dot3StatsSingleCollisionFrames,
+ RTL8365MB_MIB_dot3StatsMultipleCollisionFrames,
+ RTL8365MB_MIB_dot3StatsDeferredTransmissions,
+ RTL8365MB_MIB_dot3StatsLateCollisions,
+ RTL8365MB_MIB_etherStatsCollisions,
+ RTL8365MB_MIB_dot3StatsExcessiveCollisions,
+ RTL8365MB_MIB_dot3OutPauseFrames,
+ RTL8365MB_MIB_ifOutDiscards,
+ RTL8365MB_MIB_dot1dTpPortInDiscards,
+ RTL8365MB_MIB_ifOutUcastPkts,
+ RTL8365MB_MIB_ifOutMulticastPkts,
+ RTL8365MB_MIB_ifOutBroadcastPkts,
+ RTL8365MB_MIB_outOampduPkts,
+ RTL8365MB_MIB_inOampduPkts,
+ RTL8365MB_MIB_inIgmpJoinsSuccess,
+ RTL8365MB_MIB_inIgmpJoinsFail,
+ RTL8365MB_MIB_inMldJoinsSuccess,
+ RTL8365MB_MIB_inMldJoinsFail,
+ RTL8365MB_MIB_inReportSuppressionDrop,
+ RTL8365MB_MIB_inLeaveSuppressionDrop,
+ RTL8365MB_MIB_outIgmpReports,
+ RTL8365MB_MIB_outIgmpLeaves,
+ RTL8365MB_MIB_outIgmpGeneralQuery,
+ RTL8365MB_MIB_outIgmpSpecificQuery,
+ RTL8365MB_MIB_outMldReports,
+ RTL8365MB_MIB_outMldLeaves,
+ RTL8365MB_MIB_outMldGeneralQuery,
+ RTL8365MB_MIB_outMldSpecificQuery,
+ RTL8365MB_MIB_inKnownMulticastPkts,
+ RTL8365MB_MIB_END,
+};
+
+struct rtl8365mb_mib_counter {
+ u32 offset;
+ u32 length;
+ const char *name;
+};
+
+#define RTL8365MB_MAKE_MIB_COUNTER(_offset, _length, _name) \
+ [RTL8365MB_MIB_ ## _name] = { _offset, _length, #_name }
+
+static struct rtl8365mb_mib_counter rtl8365mb_mib_counters[] = {
+ RTL8365MB_MAKE_MIB_COUNTER(0, 4, ifInOctets),
+ RTL8365MB_MAKE_MIB_COUNTER(4, 2, dot3StatsFCSErrors),
+ RTL8365MB_MAKE_MIB_COUNTER(6, 2, dot3StatsSymbolErrors),
+ RTL8365MB_MAKE_MIB_COUNTER(8, 2, dot3InPauseFrames),
+ RTL8365MB_MAKE_MIB_COUNTER(10, 2, dot3ControlInUnknownOpcodes),
+ RTL8365MB_MAKE_MIB_COUNTER(12, 2, etherStatsFragments),
+ RTL8365MB_MAKE_MIB_COUNTER(14, 2, etherStatsJabbers),
+ RTL8365MB_MAKE_MIB_COUNTER(16, 2, ifInUcastPkts),
+ RTL8365MB_MAKE_MIB_COUNTER(18, 2, etherStatsDropEvents),
+ RTL8365MB_MAKE_MIB_COUNTER(20, 2, ifInMulticastPkts),
+ RTL8365MB_MAKE_MIB_COUNTER(22, 2, ifInBroadcastPkts),
+ RTL8365MB_MAKE_MIB_COUNTER(24, 2, inMldChecksumError),
+ RTL8365MB_MAKE_MIB_COUNTER(26, 2, inIgmpChecksumError),
+ RTL8365MB_MAKE_MIB_COUNTER(28, 2, inMldSpecificQuery),
+ RTL8365MB_MAKE_MIB_COUNTER(30, 2, inMldGeneralQuery),
+ RTL8365MB_MAKE_MIB_COUNTER(32, 2, inIgmpSpecificQuery),
+ RTL8365MB_MAKE_MIB_COUNTER(34, 2, inIgmpGeneralQuery),
+ RTL8365MB_MAKE_MIB_COUNTER(36, 2, inMldLeaves),
+ RTL8365MB_MAKE_MIB_COUNTER(38, 2, inIgmpLeaves),
+ RTL8365MB_MAKE_MIB_COUNTER(40, 4, etherStatsOctets),
+ RTL8365MB_MAKE_MIB_COUNTER(44, 2, etherStatsUnderSizePkts),
+ RTL8365MB_MAKE_MIB_COUNTER(46, 2, etherOversizeStats),
+ RTL8365MB_MAKE_MIB_COUNTER(48, 2, etherStatsPkts64Octets),
+ RTL8365MB_MAKE_MIB_COUNTER(50, 2, etherStatsPkts65to127Octets),
+ RTL8365MB_MAKE_MIB_COUNTER(52, 2, etherStatsPkts128to255Octets),
+ RTL8365MB_MAKE_MIB_COUNTER(54, 2, etherStatsPkts256to511Octets),
+ RTL8365MB_MAKE_MIB_COUNTER(56, 2, etherStatsPkts512to1023Octets),
+ RTL8365MB_MAKE_MIB_COUNTER(58, 2, etherStatsPkts1024to1518Octets),
+ RTL8365MB_MAKE_MIB_COUNTER(60, 4, ifOutOctets),
+ RTL8365MB_MAKE_MIB_COUNTER(64, 2, dot3StatsSingleCollisionFrames),
+ RTL8365MB_MAKE_MIB_COUNTER(66, 2, dot3StatsMultipleCollisionFrames),
+ RTL8365MB_MAKE_MIB_COUNTER(68, 2, dot3StatsDeferredTransmissions),
+ RTL8365MB_MAKE_MIB_COUNTER(70, 2, dot3StatsLateCollisions),
+ RTL8365MB_MAKE_MIB_COUNTER(72, 2, etherStatsCollisions),
+ RTL8365MB_MAKE_MIB_COUNTER(74, 2, dot3StatsExcessiveCollisions),
+ RTL8365MB_MAKE_MIB_COUNTER(76, 2, dot3OutPauseFrames),
+ RTL8365MB_MAKE_MIB_COUNTER(78, 2, ifOutDiscards),
+ RTL8365MB_MAKE_MIB_COUNTER(80, 2, dot1dTpPortInDiscards),
+ RTL8365MB_MAKE_MIB_COUNTER(82, 2, ifOutUcastPkts),
+ RTL8365MB_MAKE_MIB_COUNTER(84, 2, ifOutMulticastPkts),
+ RTL8365MB_MAKE_MIB_COUNTER(86, 2, ifOutBroadcastPkts),
+ RTL8365MB_MAKE_MIB_COUNTER(88, 2, outOampduPkts),
+ RTL8365MB_MAKE_MIB_COUNTER(90, 2, inOampduPkts),
+ RTL8365MB_MAKE_MIB_COUNTER(92, 4, inIgmpJoinsSuccess),
+ RTL8365MB_MAKE_MIB_COUNTER(96, 2, inIgmpJoinsFail),
+ RTL8365MB_MAKE_MIB_COUNTER(98, 2, inMldJoinsSuccess),
+ RTL8365MB_MAKE_MIB_COUNTER(100, 2, inMldJoinsFail),
+ RTL8365MB_MAKE_MIB_COUNTER(102, 2, inReportSuppressionDrop),
+ RTL8365MB_MAKE_MIB_COUNTER(104, 2, inLeaveSuppressionDrop),
+ RTL8365MB_MAKE_MIB_COUNTER(106, 2, outIgmpReports),
+ RTL8365MB_MAKE_MIB_COUNTER(108, 2, outIgmpLeaves),
+ RTL8365MB_MAKE_MIB_COUNTER(110, 2, outIgmpGeneralQuery),
+ RTL8365MB_MAKE_MIB_COUNTER(112, 2, outIgmpSpecificQuery),
+ RTL8365MB_MAKE_MIB_COUNTER(114, 2, outMldReports),
+ RTL8365MB_MAKE_MIB_COUNTER(116, 2, outMldLeaves),
+ RTL8365MB_MAKE_MIB_COUNTER(118, 2, outMldGeneralQuery),
+ RTL8365MB_MAKE_MIB_COUNTER(120, 2, outMldSpecificQuery),
+ RTL8365MB_MAKE_MIB_COUNTER(122, 2, inKnownMulticastPkts),
+};
+
+static_assert(ARRAY_SIZE(rtl8365mb_mib_counters) == RTL8365MB_MIB_END);
+
+struct rtl8365mb_jam_tbl_entry {
+ u16 reg;
+ u16 val;
+};
+
+/* Lifted from the vendor driver sources */
+static const struct rtl8365mb_jam_tbl_entry rtl8365mb_init_jam_8365mb_vc[] = {
+ { 0x13EB, 0x15BB }, { 0x1303, 0x06D6 }, { 0x1304, 0x0700 },
+ { 0x13E2, 0x003F }, { 0x13F9, 0x0090 }, { 0x121E, 0x03CA },
+ { 0x1233, 0x0352 }, { 0x1237, 0x00A0 }, { 0x123A, 0x0030 },
+ { 0x1239, 0x0084 }, { 0x0301, 0x1000 }, { 0x1349, 0x001F },
+ { 0x18E0, 0x4004 }, { 0x122B, 0x241C }, { 0x1305, 0xC000 },
+ { 0x13F0, 0x0000 },
+};
+
+static const struct rtl8365mb_jam_tbl_entry rtl8365mb_init_jam_common[] = {
+ { 0x1200, 0x7FCB }, { 0x0884, 0x0003 }, { 0x06EB, 0x0001 },
+ { 0x03Fa, 0x0007 }, { 0x08C8, 0x00C0 }, { 0x0A30, 0x020E },
+ { 0x0800, 0x0000 }, { 0x0802, 0x0000 }, { 0x09DA, 0x0013 },
+ { 0x1D32, 0x0002 },
+};
+
+enum rtl8365mb_stp_state {
+ RTL8365MB_STP_STATE_DISABLED = 0,
+ RTL8365MB_STP_STATE_BLOCKING = 1,
+ RTL8365MB_STP_STATE_LEARNING = 2,
+ RTL8365MB_STP_STATE_FORWARDING = 3,
+};
+
+enum rtl8365mb_cpu_insert {
+ RTL8365MB_CPU_INSERT_TO_ALL = 0,
+ RTL8365MB_CPU_INSERT_TO_TRAPPING = 1,
+ RTL8365MB_CPU_INSERT_TO_NONE = 2,
+};
+
+enum rtl8365mb_cpu_position {
+ RTL8365MB_CPU_POS_AFTER_SA = 0,
+ RTL8365MB_CPU_POS_BEFORE_CRC = 1,
+};
+
+enum rtl8365mb_cpu_format {
+ RTL8365MB_CPU_FORMAT_8BYTES = 0,
+ RTL8365MB_CPU_FORMAT_4BYTES = 1,
+};
+
+enum rtl8365mb_cpu_rxlen {
+ RTL8365MB_CPU_RXLEN_72BYTES = 0,
+ RTL8365MB_CPU_RXLEN_64BYTES = 1,
+};
+
+/**
+ * struct rtl8365mb_cpu - CPU port configuration
+ * @enable: enable/disable hardware insertion of CPU tag in switch->CPU frames
+ * @mask: port mask of ports that parse should parse CPU tags
+ * @trap_port: forward trapped frames to this port
+ * @insert: CPU tag insertion mode in switch->CPU frames
+ * @position: position of CPU tag in frame
+ * @rx_length: minimum CPU RX length
+ * @format: CPU tag format
+ *
+ * Represents the CPU tagging and CPU port configuration of the switch. These
+ * settings are configurable at runtime.
+ */
+struct rtl8365mb_cpu {
+ bool enable;
+ u32 mask;
+ u32 trap_port;
+ enum rtl8365mb_cpu_insert insert;
+ enum rtl8365mb_cpu_position position;
+ enum rtl8365mb_cpu_rxlen rx_length;
+ enum rtl8365mb_cpu_format format;
+};
+
+/**
+ * struct rtl8365mb_port - private per-port data
+ * @smi: pointer to parent realtek_smi data
+ * @index: DSA port index, same as dsa_port::index
+ * @stats: link statistics populated by rtl8365mb_stats_poll, ready for atomic
+ * access via rtl8365mb_get_stats64
+ * @stats_lock: protect the stats structure during read/update
+ * @mib_work: delayed work for polling MIB counters
+ */
+struct rtl8365mb_port {
+ struct realtek_smi *smi;
+ unsigned int index;
+ struct rtnl_link_stats64 stats;
+ spinlock_t stats_lock;
+ struct delayed_work mib_work;
+};
+
+/**
+ * struct rtl8365mb - private chip-specific driver data
+ * @smi: pointer to parent realtek_smi data
+ * @irq: registered IRQ or zero
+ * @chip_id: chip identifier
+ * @chip_ver: chip silicon revision
+ * @port_mask: mask of all ports
+ * @learn_limit_max: maximum number of L2 addresses the chip can learn
+ * @cpu: CPU tagging and CPU port configuration for this chip
+ * @mib_lock: prevent concurrent reads of MIB counters
+ * @ports: per-port data
+ * @jam_table: chip-specific initialization jam table
+ * @jam_size: size of the chip's jam table
+ *
+ * Private data for this driver.
+ */
+struct rtl8365mb {
+ struct realtek_smi *smi;
+ int irq;
+ u32 chip_id;
+ u32 chip_ver;
+ u32 port_mask;
+ u32 learn_limit_max;
+ struct rtl8365mb_cpu cpu;
+ struct mutex mib_lock;
+ struct rtl8365mb_port ports[RTL8365MB_MAX_NUM_PORTS];
+ const struct rtl8365mb_jam_tbl_entry *jam_table;
+ size_t jam_size;
+};
+
+static int rtl8365mb_phy_poll_busy(struct realtek_smi *smi)
+{
+ u32 val;
+
+ return regmap_read_poll_timeout(smi->map,
+ RTL8365MB_INDIRECT_ACCESS_STATUS_REG,
+ val, !val, 10, 100);
+}
+
+static int rtl8365mb_phy_ocp_prepare(struct realtek_smi *smi, int phy,
+ u32 ocp_addr)
+{
+ u32 val;
+ int ret;
+
+ /* Set OCP prefix */
+ val = FIELD_GET(RTL8365MB_PHY_OCP_ADDR_PREFIX_MASK, ocp_addr);
+ ret = regmap_update_bits(
+ smi->map, RTL8365MB_GPHY_OCP_MSB_0_REG,
+ RTL8365MB_GPHY_OCP_MSB_0_CFG_CPU_OCPADR_MASK,
+ FIELD_PREP(RTL8365MB_GPHY_OCP_MSB_0_CFG_CPU_OCPADR_MASK, val));
+ if (ret)
+ return ret;
+
+ /* Set PHY register address */
+ val = RTL8365MB_PHY_BASE;
+ val |= FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_ADDRESS_PHYNUM_MASK, phy);
+ val |= FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_ADDRESS_OCPADR_5_1_MASK,
+ ocp_addr >> 1);
+ val |= FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_ADDRESS_OCPADR_9_6_MASK,
+ ocp_addr >> 6);
+ ret = regmap_write(smi->map, RTL8365MB_INDIRECT_ACCESS_ADDRESS_REG,
+ val);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int rtl8365mb_phy_ocp_read(struct realtek_smi *smi, int phy,
+ u32 ocp_addr, u16 *data)
+{
+ u32 val;
+ int ret;
+
+ ret = rtl8365mb_phy_poll_busy(smi);
+ if (ret)
+ return ret;
+
+ ret = rtl8365mb_phy_ocp_prepare(smi, phy, ocp_addr);
+ if (ret)
+ return ret;
+
+ /* Execute read operation */
+ val = FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_CTRL_CMD_MASK,
+ RTL8365MB_INDIRECT_ACCESS_CTRL_CMD_VALUE) |
+ FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_CTRL_RW_MASK,
+ RTL8365MB_INDIRECT_ACCESS_CTRL_RW_READ);
+ ret = regmap_write(smi->map, RTL8365MB_INDIRECT_ACCESS_CTRL_REG, val);
+ if (ret)
+ return ret;
+
+ ret = rtl8365mb_phy_poll_busy(smi);
+ if (ret)
+ return ret;
+
+ /* Get PHY register data */
+ ret = regmap_read(smi->map, RTL8365MB_INDIRECT_ACCESS_READ_DATA_REG,
+ &val);
+ if (ret)
+ return ret;
+
+ *data = val & 0xFFFF;
+
+ return 0;
+}
+
+static int rtl8365mb_phy_ocp_write(struct realtek_smi *smi, int phy,
+ u32 ocp_addr, u16 data)
+{
+ u32 val;
+ int ret;
+
+ ret = rtl8365mb_phy_poll_busy(smi);
+ if (ret)
+ return ret;
+
+ ret = rtl8365mb_phy_ocp_prepare(smi, phy, ocp_addr);
+ if (ret)
+ return ret;
+
+ /* Set PHY register data */
+ ret = regmap_write(smi->map, RTL8365MB_INDIRECT_ACCESS_WRITE_DATA_REG,
+ data);
+ if (ret)
+ return ret;
+
+ /* Execute write operation */
+ val = FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_CTRL_CMD_MASK,
+ RTL8365MB_INDIRECT_ACCESS_CTRL_CMD_VALUE) |
+ FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_CTRL_RW_MASK,
+ RTL8365MB_INDIRECT_ACCESS_CTRL_RW_WRITE);
+ ret = regmap_write(smi->map, RTL8365MB_INDIRECT_ACCESS_CTRL_REG, val);
+ if (ret)
+ return ret;
+
+ ret = rtl8365mb_phy_poll_busy(smi);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int rtl8365mb_phy_read(struct realtek_smi *smi, int phy, int regnum)
+{
+ u32 ocp_addr;
+ u16 val;
+ int ret;
+
+ if (regnum > RTL8365MB_PHYREGMAX)
+ return -EINVAL;
+
+ ocp_addr = RTL8365MB_PHY_OCP_ADDR_PHYREG_BASE + regnum * 2;
+
+ ret = rtl8365mb_phy_ocp_read(smi, phy, ocp_addr, &val);
+ if (ret) {
+ dev_err(smi->dev,
+ "failed to read PHY%d reg %02x @ %04x, ret %d\n", phy,
+ regnum, ocp_addr, ret);
+ return ret;
+ }
+
+ dev_dbg(smi->dev, "read PHY%d register 0x%02x @ %04x, val <- %04x\n",
+ phy, regnum, ocp_addr, val);
+
+ return val;
+}
+
+static int rtl8365mb_phy_write(struct realtek_smi *smi, int phy, int regnum,
+ u16 val)
+{
+ u32 ocp_addr;
+ int ret;
+
+ if (regnum > RTL8365MB_PHYREGMAX)
+ return -EINVAL;
+
+ ocp_addr = RTL8365MB_PHY_OCP_ADDR_PHYREG_BASE + regnum * 2;
+
+ ret = rtl8365mb_phy_ocp_write(smi, phy, ocp_addr, val);
+ if (ret) {
+ dev_err(smi->dev,
+ "failed to write PHY%d reg %02x @ %04x, ret %d\n", phy,
+ regnum, ocp_addr, ret);
+ return ret;
+ }
+
+ dev_dbg(smi->dev, "write PHY%d register 0x%02x @ %04x, val -> %04x\n",
+ phy, regnum, ocp_addr, val);
+
+ return 0;
+}
+
+static enum dsa_tag_protocol
+rtl8365mb_get_tag_protocol(struct dsa_switch *ds, int port,
+ enum dsa_tag_protocol mp)
+{
+ return DSA_TAG_PROTO_RTL8_4;
+}
+
+static int rtl8365mb_ext_config_rgmii(struct realtek_smi *smi, int port,
+ phy_interface_t interface)
+{
+ struct device_node *dn;
+ struct dsa_port *dp;
+ int tx_delay = 0;
+ int rx_delay = 0;
+ int ext_port;
+ u32 val;
+ int ret;
+
+ if (port == smi->cpu_port) {
+ ext_port = 1;
+ } else {
+ dev_err(smi->dev, "only one EXT port is currently supported\n");
+ return -EINVAL;
+ }
+
+ dp = dsa_to_port(smi->ds, port);
+ dn = dp->dn;
+
+ /* Set the RGMII TX/RX delay
+ *
+ * The Realtek vendor driver indicates the following possible
+ * configuration settings:
+ *
+ * TX delay:
+ * 0 = no delay, 1 = 2 ns delay
+ * RX delay:
+ * 0 = no delay, 7 = maximum delay
+ * No units are specified, but there are a total of 8 steps.
+ *
+ * The vendor driver also states that this must be configured *before*
+ * forcing the external interface into a particular mode, which is done
+ * in the rtl8365mb_phylink_mac_link_{up,down} functions.
+ *
+ * Only configure an RGMII TX (resp. RX) delay if the
+ * tx-internal-delay-ps (resp. rx-internal-delay-ps) OF property is
+ * specified. We ignore the detail of the RGMII interface mode
+ * (RGMII_{RXID, TXID, etc.}), as this is considered to be a PHY-only
+ * property.
+ *
+ * For the RX delay, we assume that a register value of 4 corresponds to
+ * 2 ns. But this is just an educated guess, so ignore all other values
+ * to avoid too much confusion.
+ */
+ if (!of_property_read_u32(dn, "tx-internal-delay-ps", &val)) {
+ val = val / 1000; /* convert to ns */
+
+ if (val == 0 || val == 2)
+ tx_delay = val / 2;
+ else
+ dev_warn(smi->dev,
+ "EXT port TX delay must be 0 or 2 ns\n");
+ }
+
+ if (!of_property_read_u32(dn, "rx-internal-delay-ps", &val)) {
+ val = val / 1000; /* convert to ns */
+
+ if (val == 0 || val == 2)
+ rx_delay = val * 2;
+ else
+ dev_warn(smi->dev,
+ "EXT port RX delay must be 0 to 2 ns\n");
+ }
+
+ ret = regmap_update_bits(
+ smi->map, RTL8365MB_EXT_RGMXF_REG(ext_port),
+ RTL8365MB_EXT_RGMXF_TXDELAY_MASK |
+ RTL8365MB_EXT_RGMXF_RXDELAY_MASK,
+ FIELD_PREP(RTL8365MB_EXT_RGMXF_TXDELAY_MASK, tx_delay) |
+ FIELD_PREP(RTL8365MB_EXT_RGMXF_RXDELAY_MASK, rx_delay));
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(
+ smi->map, RTL8365MB_DIGITAL_INTERFACE_SELECT_REG(ext_port),
+ RTL8365MB_DIGITAL_INTERFACE_SELECT_MODE_MASK(ext_port),
+ RTL8365MB_EXT_PORT_MODE_RGMII
+ << RTL8365MB_DIGITAL_INTERFACE_SELECT_MODE_OFFSET(
+ ext_port));
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int rtl8365mb_ext_config_forcemode(struct realtek_smi *smi, int port,
+ bool link, int speed, int duplex,
+ bool tx_pause, bool rx_pause)
+{
+ u32 r_tx_pause;
+ u32 r_rx_pause;
+ u32 r_duplex;
+ u32 r_speed;
+ u32 r_link;
+ int ext_port;
+ int val;
+ int ret;
+
+ if (port == smi->cpu_port) {
+ ext_port = 1;
+ } else {
+ dev_err(smi->dev, "only one EXT port is currently supported\n");
+ return -EINVAL;
+ }
+
+ if (link) {
+ /* Force the link up with the desired configuration */
+ r_link = 1;
+ r_rx_pause = rx_pause ? 1 : 0;
+ r_tx_pause = tx_pause ? 1 : 0;
+
+ if (speed == SPEED_1000) {
+ r_speed = RTL8365MB_PORT_SPEED_1000M;
+ } else if (speed == SPEED_100) {
+ r_speed = RTL8365MB_PORT_SPEED_100M;
+ } else if (speed == SPEED_10) {
+ r_speed = RTL8365MB_PORT_SPEED_10M;
+ } else {
+ dev_err(smi->dev, "unsupported port speed %s\n",
+ phy_speed_to_str(speed));
+ return -EINVAL;
+ }
+
+ if (duplex == DUPLEX_FULL) {
+ r_duplex = 1;
+ } else if (duplex == DUPLEX_HALF) {
+ r_duplex = 0;
+ } else {
+ dev_err(smi->dev, "unsupported duplex %s\n",
+ phy_duplex_to_str(duplex));
+ return -EINVAL;
+ }
+ } else {
+ /* Force the link down and reset any programmed configuration */
+ r_link = 0;
+ r_tx_pause = 0;
+ r_rx_pause = 0;
+ r_speed = 0;
+ r_duplex = 0;
+ }
+
+ val = FIELD_PREP(RTL8365MB_DIGITAL_INTERFACE_FORCE_EN_MASK, 1) |
+ FIELD_PREP(RTL8365MB_DIGITAL_INTERFACE_FORCE_TXPAUSE_MASK,
+ r_tx_pause) |
+ FIELD_PREP(RTL8365MB_DIGITAL_INTERFACE_FORCE_RXPAUSE_MASK,
+ r_rx_pause) |
+ FIELD_PREP(RTL8365MB_DIGITAL_INTERFACE_FORCE_LINK_MASK, r_link) |
+ FIELD_PREP(RTL8365MB_DIGITAL_INTERFACE_FORCE_DUPLEX_MASK,
+ r_duplex) |
+ FIELD_PREP(RTL8365MB_DIGITAL_INTERFACE_FORCE_SPEED_MASK, r_speed);
+ ret = regmap_write(smi->map,
+ RTL8365MB_DIGITAL_INTERFACE_FORCE_REG(ext_port),
+ val);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static bool rtl8365mb_phy_mode_supported(struct dsa_switch *ds, int port,
+ phy_interface_t interface)
+{
+ if (dsa_is_user_port(ds, port) &&
+ (interface == PHY_INTERFACE_MODE_NA ||
+ interface == PHY_INTERFACE_MODE_INTERNAL))
+ /* Internal PHY */
+ return true;
+ else if (dsa_is_cpu_port(ds, port) &&
+ phy_interface_mode_is_rgmii(interface))
+ /* Extension MAC */
+ return true;
+
+ return false;
+}
+
+static void rtl8365mb_phylink_validate(struct dsa_switch *ds, int port,
+ unsigned long *supported,
+ struct phylink_link_state *state)
+{
+ struct realtek_smi *smi = ds->priv;
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0 };
+
+ /* include/linux/phylink.h says:
+ * When @state->interface is %PHY_INTERFACE_MODE_NA, phylink
+ * expects the MAC driver to return all supported link modes.
+ */
+ if (state->interface != PHY_INTERFACE_MODE_NA &&
+ !rtl8365mb_phy_mode_supported(ds, port, state->interface)) {
+ dev_err(smi->dev, "phy mode %s is unsupported on port %d\n",
+ phy_modes(state->interface), port);
+ linkmode_zero(supported);
+ return;
+ }
+
+ phylink_set_port_modes(mask);
+
+ phylink_set(mask, Autoneg);
+ phylink_set(mask, Pause);
+ phylink_set(mask, Asym_Pause);
+
+ phylink_set(mask, 10baseT_Half);
+ phylink_set(mask, 10baseT_Full);
+ phylink_set(mask, 100baseT_Half);
+ phylink_set(mask, 100baseT_Full);
+ phylink_set(mask, 1000baseT_Full);
+
+ linkmode_and(supported, supported, mask);
+ linkmode_and(state->advertising, state->advertising, mask);
+}
+
+static void rtl8365mb_phylink_mac_config(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct realtek_smi *smi = ds->priv;
+ int ret;
+
+ if (!rtl8365mb_phy_mode_supported(ds, port, state->interface)) {
+ dev_err(smi->dev, "phy mode %s is unsupported on port %d\n",
+ phy_modes(state->interface), port);
+ return;
+ }
+
+ if (mode != MLO_AN_PHY && mode != MLO_AN_FIXED) {
+ dev_err(smi->dev,
+ "port %d supports only conventional PHY or fixed-link\n",
+ port);
+ return;
+ }
+
+ if (phy_interface_mode_is_rgmii(state->interface)) {
+ ret = rtl8365mb_ext_config_rgmii(smi, port, state->interface);
+ if (ret)
+ dev_err(smi->dev,
+ "failed to configure RGMII mode on port %d: %d\n",
+ port, ret);
+ return;
+ }
+
+ /* TODO: Implement MII and RMII modes, which the RTL8365MB-VC also
+ * supports
+ */
+}
+
+static void rtl8365mb_phylink_mac_link_down(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface)
+{
+ struct realtek_smi *smi = ds->priv;
+ struct rtl8365mb_port *p;
+ struct rtl8365mb *mb;
+ int ret;
+
+ mb = smi->chip_data;
+ p = &mb->ports[port];
+ cancel_delayed_work_sync(&p->mib_work);
+
+ if (phy_interface_mode_is_rgmii(interface)) {
+ ret = rtl8365mb_ext_config_forcemode(smi, port, false, 0, 0,
+ false, false);
+ if (ret)
+ dev_err(smi->dev,
+ "failed to reset forced mode on port %d: %d\n",
+ port, ret);
+
+ return;
+ }
+}
+
+static void rtl8365mb_phylink_mac_link_up(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface,
+ struct phy_device *phydev, int speed,
+ int duplex, bool tx_pause,
+ bool rx_pause)
+{
+ struct realtek_smi *smi = ds->priv;
+ struct rtl8365mb_port *p;
+ struct rtl8365mb *mb;
+ int ret;
+
+ mb = smi->chip_data;
+ p = &mb->ports[port];
+ schedule_delayed_work(&p->mib_work, 0);
+
+ if (phy_interface_mode_is_rgmii(interface)) {
+ ret = rtl8365mb_ext_config_forcemode(smi, port, true, speed,
+ duplex, tx_pause,
+ rx_pause);
+ if (ret)
+ dev_err(smi->dev,
+ "failed to force mode on port %d: %d\n", port,
+ ret);
+
+ return;
+ }
+}
+
+static void rtl8365mb_port_stp_state_set(struct dsa_switch *ds, int port,
+ u8 state)
+{
+ struct realtek_smi *smi = ds->priv;
+ enum rtl8365mb_stp_state val;
+ int msti = 0;
+
+ switch (state) {
+ case BR_STATE_DISABLED:
+ val = RTL8365MB_STP_STATE_DISABLED;
+ break;
+ case BR_STATE_BLOCKING:
+ case BR_STATE_LISTENING:
+ val = RTL8365MB_STP_STATE_BLOCKING;
+ break;
+ case BR_STATE_LEARNING:
+ val = RTL8365MB_STP_STATE_LEARNING;
+ break;
+ case BR_STATE_FORWARDING:
+ val = RTL8365MB_STP_STATE_FORWARDING;
+ break;
+ default:
+ dev_err(smi->dev, "invalid STP state: %u\n", state);
+ return;
+ }
+
+ regmap_update_bits(smi->map, RTL8365MB_MSTI_CTRL_REG(msti, port),
+ RTL8365MB_MSTI_CTRL_PORT_STATE_MASK(port),
+ val << RTL8365MB_MSTI_CTRL_PORT_STATE_OFFSET(port));
+}
+
+static int rtl8365mb_port_set_learning(struct realtek_smi *smi, int port,
+ bool enable)
+{
+ struct rtl8365mb *mb = smi->chip_data;
+
+ /* Enable/disable learning by limiting the number of L2 addresses the
+ * port can learn. Realtek documentation states that a limit of zero
+ * disables learning. When enabling learning, set it to the chip's
+ * maximum.
+ */
+ return regmap_write(smi->map, RTL8365MB_LUT_PORT_LEARN_LIMIT_REG(port),
+ enable ? mb->learn_limit_max : 0);
+}
+
+static int rtl8365mb_port_set_isolation(struct realtek_smi *smi, int port,
+ u32 mask)
+{
+ return regmap_write(smi->map, RTL8365MB_PORT_ISOLATION_REG(port), mask);
+}
+
+static int rtl8365mb_mib_counter_read(struct realtek_smi *smi, int port,
+ u32 offset, u32 length, u64 *mibvalue)
+{
+ u64 tmpvalue = 0;
+ u32 val;
+ int ret;
+ int i;
+
+ /* The MIB address is an SRAM address. We request a particular address
+ * and then poll the control register before reading the value from some
+ * counter registers.
+ */
+ ret = regmap_write(smi->map, RTL8365MB_MIB_ADDRESS_REG,
+ RTL8365MB_MIB_ADDRESS(port, offset));
+ if (ret)
+ return ret;
+
+ /* Poll for completion */
+ ret = regmap_read_poll_timeout(smi->map, RTL8365MB_MIB_CTRL0_REG, val,
+ !(val & RTL8365MB_MIB_CTRL0_BUSY_MASK),
+ 10, 100);
+ if (ret)
+ return ret;
+
+ /* Presumably this indicates a MIB counter read failure */
+ if (val & RTL8365MB_MIB_CTRL0_RESET_MASK)
+ return -EIO;
+
+ /* There are four MIB counter registers each holding a 16 bit word of a
+ * MIB counter. Depending on the offset, we should read from the upper
+ * two or lower two registers. In case the MIB counter is 4 words, we
+ * read from all four registers.
+ */
+ if (length == 4)
+ offset = 3;
+ else
+ offset = (offset + 1) % 4;
+
+ /* Read the MIB counter 16 bits at a time */
+ for (i = 0; i < length; i++) {
+ ret = regmap_read(smi->map,
+ RTL8365MB_MIB_COUNTER_REG(offset - i), &val);
+ if (ret)
+ return ret;
+
+ tmpvalue = ((tmpvalue) << 16) | (val & 0xFFFF);
+ }
+
+ /* Only commit the result if no error occurred */
+ *mibvalue = tmpvalue;
+
+ return 0;
+}
+
+static void rtl8365mb_get_ethtool_stats(struct dsa_switch *ds, int port, u64 *data)
+{
+ struct realtek_smi *smi = ds->priv;
+ struct rtl8365mb *mb;
+ int ret;
+ int i;
+
+ mb = smi->chip_data;
+
+ mutex_lock(&mb->mib_lock);
+ for (i = 0; i < RTL8365MB_MIB_END; i++) {
+ struct rtl8365mb_mib_counter *mib = &rtl8365mb_mib_counters[i];
+
+ ret = rtl8365mb_mib_counter_read(smi, port, mib->offset,
+ mib->length, &data[i]);
+ if (ret) {
+ dev_err(smi->dev,
+ "failed to read port %d counters: %d\n", port,
+ ret);
+ break;
+ }
+ }
+ mutex_unlock(&mb->mib_lock);
+}
+
+static void rtl8365mb_get_strings(struct dsa_switch *ds, int port, u32 stringset, u8 *data)
+{
+ int i;
+
+ if (stringset != ETH_SS_STATS)
+ return;
+
+ for (i = 0; i < RTL8365MB_MIB_END; i++) {
+ struct rtl8365mb_mib_counter *mib = &rtl8365mb_mib_counters[i];
+
+ strncpy(data + i * ETH_GSTRING_LEN, mib->name, ETH_GSTRING_LEN);
+ }
+}
+
+static int rtl8365mb_get_sset_count(struct dsa_switch *ds, int port, int sset)
+{
+ if (sset != ETH_SS_STATS)
+ return -EOPNOTSUPP;
+
+ return RTL8365MB_MIB_END;
+}
+
+static void rtl8365mb_get_phy_stats(struct dsa_switch *ds, int port,
+ struct ethtool_eth_phy_stats *phy_stats)
+{
+ struct realtek_smi *smi = ds->priv;
+ struct rtl8365mb_mib_counter *mib;
+ struct rtl8365mb *mb;
+
+ mb = smi->chip_data;
+ mib = &rtl8365mb_mib_counters[RTL8365MB_MIB_dot3StatsSymbolErrors];
+
+ mutex_lock(&mb->mib_lock);
+ rtl8365mb_mib_counter_read(smi, port, mib->offset, mib->length,
+ &phy_stats->SymbolErrorDuringCarrier);
+ mutex_unlock(&mb->mib_lock);
+}
+
+static void rtl8365mb_get_mac_stats(struct dsa_switch *ds, int port,
+ struct ethtool_eth_mac_stats *mac_stats)
+{
+ u64 cnt[RTL8365MB_MIB_END] = {
+ [RTL8365MB_MIB_ifOutOctets] = 1,
+ [RTL8365MB_MIB_ifOutUcastPkts] = 1,
+ [RTL8365MB_MIB_ifOutMulticastPkts] = 1,
+ [RTL8365MB_MIB_ifOutBroadcastPkts] = 1,
+ [RTL8365MB_MIB_dot3OutPauseFrames] = 1,
+ [RTL8365MB_MIB_ifOutDiscards] = 1,
+ [RTL8365MB_MIB_ifInOctets] = 1,
+ [RTL8365MB_MIB_ifInUcastPkts] = 1,
+ [RTL8365MB_MIB_ifInMulticastPkts] = 1,
+ [RTL8365MB_MIB_ifInBroadcastPkts] = 1,
+ [RTL8365MB_MIB_dot3InPauseFrames] = 1,
+ [RTL8365MB_MIB_dot3StatsSingleCollisionFrames] = 1,
+ [RTL8365MB_MIB_dot3StatsMultipleCollisionFrames] = 1,
+ [RTL8365MB_MIB_dot3StatsFCSErrors] = 1,
+ [RTL8365MB_MIB_dot3StatsDeferredTransmissions] = 1,
+ [RTL8365MB_MIB_dot3StatsLateCollisions] = 1,
+ [RTL8365MB_MIB_dot3StatsExcessiveCollisions] = 1,
+
+ };
+ struct realtek_smi *smi = ds->priv;
+ struct rtl8365mb *mb;
+ int ret;
+ int i;
+
+ mb = smi->chip_data;
+
+ mutex_lock(&mb->mib_lock);
+ for (i = 0; i < RTL8365MB_MIB_END; i++) {
+ struct rtl8365mb_mib_counter *mib = &rtl8365mb_mib_counters[i];
+
+ /* Only fetch required MIB counters (marked = 1 above) */
+ if (!cnt[i])
+ continue;
+
+ ret = rtl8365mb_mib_counter_read(smi, port, mib->offset,
+ mib->length, &cnt[i]);
+ if (ret)
+ break;
+ }
+ mutex_unlock(&mb->mib_lock);
+
+ /* The RTL8365MB-VC exposes MIB objects, which we have to translate into
+ * IEEE 802.3 Managed Objects. This is not always completely faithful,
+ * but we try out best. See RFC 3635 for a detailed treatment of the
+ * subject.
+ */
+
+ mac_stats->FramesTransmittedOK = cnt[RTL8365MB_MIB_ifOutUcastPkts] +
+ cnt[RTL8365MB_MIB_ifOutMulticastPkts] +
+ cnt[RTL8365MB_MIB_ifOutBroadcastPkts] +
+ cnt[RTL8365MB_MIB_dot3OutPauseFrames] -
+ cnt[RTL8365MB_MIB_ifOutDiscards];
+ mac_stats->SingleCollisionFrames =
+ cnt[RTL8365MB_MIB_dot3StatsSingleCollisionFrames];
+ mac_stats->MultipleCollisionFrames =
+ cnt[RTL8365MB_MIB_dot3StatsMultipleCollisionFrames];
+ mac_stats->FramesReceivedOK = cnt[RTL8365MB_MIB_ifInUcastPkts] +
+ cnt[RTL8365MB_MIB_ifInMulticastPkts] +
+ cnt[RTL8365MB_MIB_ifInBroadcastPkts] +
+ cnt[RTL8365MB_MIB_dot3InPauseFrames];
+ mac_stats->FrameCheckSequenceErrors =
+ cnt[RTL8365MB_MIB_dot3StatsFCSErrors];
+ mac_stats->OctetsTransmittedOK = cnt[RTL8365MB_MIB_ifOutOctets] -
+ 18 * mac_stats->FramesTransmittedOK;
+ mac_stats->FramesWithDeferredXmissions =
+ cnt[RTL8365MB_MIB_dot3StatsDeferredTransmissions];
+ mac_stats->LateCollisions = cnt[RTL8365MB_MIB_dot3StatsLateCollisions];
+ mac_stats->FramesAbortedDueToXSColls =
+ cnt[RTL8365MB_MIB_dot3StatsExcessiveCollisions];
+ mac_stats->OctetsReceivedOK = cnt[RTL8365MB_MIB_ifInOctets] -
+ 18 * mac_stats->FramesReceivedOK;
+ mac_stats->MulticastFramesXmittedOK =
+ cnt[RTL8365MB_MIB_ifOutMulticastPkts];
+ mac_stats->BroadcastFramesXmittedOK =
+ cnt[RTL8365MB_MIB_ifOutBroadcastPkts];
+ mac_stats->MulticastFramesReceivedOK =
+ cnt[RTL8365MB_MIB_ifInMulticastPkts];
+ mac_stats->BroadcastFramesReceivedOK =
+ cnt[RTL8365MB_MIB_ifInBroadcastPkts];
+}
+
+static void rtl8365mb_get_ctrl_stats(struct dsa_switch *ds, int port,
+ struct ethtool_eth_ctrl_stats *ctrl_stats)
+{
+ struct realtek_smi *smi = ds->priv;
+ struct rtl8365mb_mib_counter *mib;
+ struct rtl8365mb *mb;
+
+ mb = smi->chip_data;
+ mib = &rtl8365mb_mib_counters[RTL8365MB_MIB_dot3ControlInUnknownOpcodes];
+
+ mutex_lock(&mb->mib_lock);
+ rtl8365mb_mib_counter_read(smi, port, mib->offset, mib->length,
+ &ctrl_stats->UnsupportedOpcodesReceived);
+ mutex_unlock(&mb->mib_lock);
+}
+
+static void rtl8365mb_stats_update(struct realtek_smi *smi, int port)
+{
+ u64 cnt[RTL8365MB_MIB_END] = {
+ [RTL8365MB_MIB_ifOutOctets] = 1,
+ [RTL8365MB_MIB_ifOutUcastPkts] = 1,
+ [RTL8365MB_MIB_ifOutMulticastPkts] = 1,
+ [RTL8365MB_MIB_ifOutBroadcastPkts] = 1,
+ [RTL8365MB_MIB_ifOutDiscards] = 1,
+ [RTL8365MB_MIB_ifInOctets] = 1,
+ [RTL8365MB_MIB_ifInUcastPkts] = 1,
+ [RTL8365MB_MIB_ifInMulticastPkts] = 1,
+ [RTL8365MB_MIB_ifInBroadcastPkts] = 1,
+ [RTL8365MB_MIB_etherStatsDropEvents] = 1,
+ [RTL8365MB_MIB_etherStatsCollisions] = 1,
+ [RTL8365MB_MIB_etherStatsFragments] = 1,
+ [RTL8365MB_MIB_etherStatsJabbers] = 1,
+ [RTL8365MB_MIB_dot3StatsFCSErrors] = 1,
+ [RTL8365MB_MIB_dot3StatsLateCollisions] = 1,
+ };
+ struct rtl8365mb *mb = smi->chip_data;
+ struct rtnl_link_stats64 *stats;
+ int ret;
+ int i;
+
+ stats = &mb->ports[port].stats;
+
+ mutex_lock(&mb->mib_lock);
+ for (i = 0; i < RTL8365MB_MIB_END; i++) {
+ struct rtl8365mb_mib_counter *c = &rtl8365mb_mib_counters[i];
+
+ /* Only fetch required MIB counters (marked = 1 above) */
+ if (!cnt[i])
+ continue;
+
+ ret = rtl8365mb_mib_counter_read(smi, port, c->offset,
+ c->length, &cnt[i]);
+ if (ret)
+ break;
+ }
+ mutex_unlock(&mb->mib_lock);
+
+ /* Don't update statistics if there was an error reading the counters */
+ if (ret)
+ return;
+
+ spin_lock(&mb->ports[port].stats_lock);
+
+ stats->rx_packets = cnt[RTL8365MB_MIB_ifInUcastPkts] +
+ cnt[RTL8365MB_MIB_ifInMulticastPkts] +
+ cnt[RTL8365MB_MIB_ifInBroadcastPkts] -
+ cnt[RTL8365MB_MIB_ifOutDiscards];
+
+ stats->tx_packets = cnt[RTL8365MB_MIB_ifOutUcastPkts] +
+ cnt[RTL8365MB_MIB_ifOutMulticastPkts] +
+ cnt[RTL8365MB_MIB_ifOutBroadcastPkts];
+
+ /* if{In,Out}Octets includes FCS - remove it */
+ stats->rx_bytes = cnt[RTL8365MB_MIB_ifInOctets] - 4 * stats->rx_packets;
+ stats->tx_bytes =
+ cnt[RTL8365MB_MIB_ifOutOctets] - 4 * stats->tx_packets;
+
+ stats->rx_dropped = cnt[RTL8365MB_MIB_etherStatsDropEvents];
+ stats->tx_dropped = cnt[RTL8365MB_MIB_ifOutDiscards];
+
+ stats->multicast = cnt[RTL8365MB_MIB_ifInMulticastPkts];
+ stats->collisions = cnt[RTL8365MB_MIB_etherStatsCollisions];
+
+ stats->rx_length_errors = cnt[RTL8365MB_MIB_etherStatsFragments] +
+ cnt[RTL8365MB_MIB_etherStatsJabbers];
+ stats->rx_crc_errors = cnt[RTL8365MB_MIB_dot3StatsFCSErrors];
+ stats->rx_errors = stats->rx_length_errors + stats->rx_crc_errors;
+
+ stats->tx_aborted_errors = cnt[RTL8365MB_MIB_ifOutDiscards];
+ stats->tx_window_errors = cnt[RTL8365MB_MIB_dot3StatsLateCollisions];
+ stats->tx_errors = stats->tx_aborted_errors + stats->tx_window_errors;
+
+ spin_unlock(&mb->ports[port].stats_lock);
+}
+
+static void rtl8365mb_stats_poll(struct work_struct *work)
+{
+ struct rtl8365mb_port *p = container_of(to_delayed_work(work),
+ struct rtl8365mb_port,
+ mib_work);
+ struct realtek_smi *smi = p->smi;
+
+ rtl8365mb_stats_update(smi, p->index);
+
+ schedule_delayed_work(&p->mib_work, RTL8365MB_STATS_INTERVAL_JIFFIES);
+}
+
+static void rtl8365mb_get_stats64(struct dsa_switch *ds, int port,
+ struct rtnl_link_stats64 *s)
+{
+ struct realtek_smi *smi = ds->priv;
+ struct rtl8365mb_port *p;
+ struct rtl8365mb *mb;
+
+ mb = smi->chip_data;
+ p = &mb->ports[port];
+
+ spin_lock(&p->stats_lock);
+ memcpy(s, &p->stats, sizeof(*s));
+ spin_unlock(&p->stats_lock);
+}
+
+static void rtl8365mb_stats_setup(struct realtek_smi *smi)
+{
+ struct rtl8365mb *mb = smi->chip_data;
+ int i;
+
+ /* Per-chip global mutex to protect MIB counter access, since doing
+ * so requires accessing a series of registers in a particular order.
+ */
+ mutex_init(&mb->mib_lock);
+
+ for (i = 0; i < smi->num_ports; i++) {
+ struct rtl8365mb_port *p = &mb->ports[i];
+
+ if (dsa_is_unused_port(smi->ds, i))
+ continue;
+
+ /* Per-port spinlock to protect the stats64 data */
+ spin_lock_init(&p->stats_lock);
+
+ /* This work polls the MIB counters and keeps the stats64 data
+ * up-to-date.
+ */
+ INIT_DELAYED_WORK(&p->mib_work, rtl8365mb_stats_poll);
+ }
+}
+
+static void rtl8365mb_stats_teardown(struct realtek_smi *smi)
+{
+ struct rtl8365mb *mb = smi->chip_data;
+ int i;
+
+ for (i = 0; i < smi->num_ports; i++) {
+ struct rtl8365mb_port *p = &mb->ports[i];
+
+ if (dsa_is_unused_port(smi->ds, i))
+ continue;
+
+ cancel_delayed_work_sync(&p->mib_work);
+ }
+}
+
+static int rtl8365mb_get_and_clear_status_reg(struct realtek_smi *smi, u32 reg,
+ u32 *val)
+{
+ int ret;
+
+ ret = regmap_read(smi->map, reg, val);
+ if (ret)
+ return ret;
+
+ return regmap_write(smi->map, reg, *val);
+}
+
+static irqreturn_t rtl8365mb_irq(int irq, void *data)
+{
+ struct realtek_smi *smi = data;
+ unsigned long line_changes = 0;
+ struct rtl8365mb *mb;
+ u32 stat;
+ int line;
+ int ret;
+
+ mb = smi->chip_data;
+
+ ret = rtl8365mb_get_and_clear_status_reg(smi, RTL8365MB_INTR_STATUS_REG,
+ &stat);
+ if (ret)
+ goto out_error;
+
+ if (stat & RTL8365MB_INTR_LINK_CHANGE_MASK) {
+ u32 linkdown_ind;
+ u32 linkup_ind;
+ u32 val;
+
+ ret = rtl8365mb_get_and_clear_status_reg(
+ smi, RTL8365MB_PORT_LINKUP_IND_REG, &val);
+ if (ret)
+ goto out_error;
+
+ linkup_ind = FIELD_GET(RTL8365MB_PORT_LINKUP_IND_MASK, val);
+
+ ret = rtl8365mb_get_and_clear_status_reg(
+ smi, RTL8365MB_PORT_LINKDOWN_IND_REG, &val);
+ if (ret)
+ goto out_error;
+
+ linkdown_ind = FIELD_GET(RTL8365MB_PORT_LINKDOWN_IND_MASK, val);
+
+ line_changes = (linkup_ind | linkdown_ind) & mb->port_mask;
+ }
+
+ if (!line_changes)
+ goto out_none;
+
+ for_each_set_bit(line, &line_changes, smi->num_ports) {
+ int child_irq = irq_find_mapping(smi->irqdomain, line);
+
+ handle_nested_irq(child_irq);
+ }
+
+ return IRQ_HANDLED;
+
+out_error:
+ dev_err(smi->dev, "failed to read interrupt status: %d\n", ret);
+
+out_none:
+ return IRQ_NONE;
+}
+
+static struct irq_chip rtl8365mb_irq_chip = {
+ .name = "rtl8365mb",
+ /* The hardware doesn't support masking IRQs on a per-port basis */
+};
+
+static int rtl8365mb_irq_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_data(irq, domain->host_data);
+ irq_set_chip_and_handler(irq, &rtl8365mb_irq_chip, handle_simple_irq);
+ irq_set_nested_thread(irq, 1);
+ irq_set_noprobe(irq);
+
+ return 0;
+}
+
+static void rtl8365mb_irq_unmap(struct irq_domain *d, unsigned int irq)
+{
+ irq_set_nested_thread(irq, 0);
+ irq_set_chip_and_handler(irq, NULL, NULL);
+ irq_set_chip_data(irq, NULL);
+}
+
+static const struct irq_domain_ops rtl8365mb_irqdomain_ops = {
+ .map = rtl8365mb_irq_map,
+ .unmap = rtl8365mb_irq_unmap,
+ .xlate = irq_domain_xlate_onecell,
+};
+
+static int rtl8365mb_set_irq_enable(struct realtek_smi *smi, bool enable)
+{
+ return regmap_update_bits(smi->map, RTL8365MB_INTR_CTRL_REG,
+ RTL8365MB_INTR_LINK_CHANGE_MASK,
+ FIELD_PREP(RTL8365MB_INTR_LINK_CHANGE_MASK,
+ enable ? 1 : 0));
+}
+
+static int rtl8365mb_irq_enable(struct realtek_smi *smi)
+{
+ return rtl8365mb_set_irq_enable(smi, true);
+}
+
+static int rtl8365mb_irq_disable(struct realtek_smi *smi)
+{
+ return rtl8365mb_set_irq_enable(smi, false);
+}
+
+static int rtl8365mb_irq_setup(struct realtek_smi *smi)
+{
+ struct rtl8365mb *mb = smi->chip_data;
+ struct device_node *intc;
+ u32 irq_trig;
+ int virq;
+ int irq;
+ u32 val;
+ int ret;
+ int i;
+
+ intc = of_get_child_by_name(smi->dev->of_node, "interrupt-controller");
+ if (!intc) {
+ dev_err(smi->dev, "missing child interrupt-controller node\n");
+ return -EINVAL;
+ }
+
+ /* rtl8365mb IRQs cascade off this one */
+ irq = of_irq_get(intc, 0);
+ if (irq <= 0) {
+ if (irq != -EPROBE_DEFER)
+ dev_err(smi->dev, "failed to get parent irq: %d\n",
+ irq);
+ ret = irq ? irq : -EINVAL;
+ goto out_put_node;
+ }
+
+ smi->irqdomain = irq_domain_add_linear(intc, smi->num_ports,
+ &rtl8365mb_irqdomain_ops, smi);
+ if (!smi->irqdomain) {
+ dev_err(smi->dev, "failed to add irq domain\n");
+ ret = -ENOMEM;
+ goto out_put_node;
+ }
+
+ for (i = 0; i < smi->num_ports; i++) {
+ virq = irq_create_mapping(smi->irqdomain, i);
+ if (!virq) {
+ dev_err(smi->dev,
+ "failed to create irq domain mapping\n");
+ ret = -EINVAL;
+ goto out_remove_irqdomain;
+ }
+
+ irq_set_parent(virq, irq);
+ }
+
+ /* Configure chip interrupt signal polarity */
+ irq_trig = irqd_get_trigger_type(irq_get_irq_data(irq));
+ switch (irq_trig) {
+ case IRQF_TRIGGER_RISING:
+ case IRQF_TRIGGER_HIGH:
+ val = RTL8365MB_INTR_POLARITY_HIGH;
+ break;
+ case IRQF_TRIGGER_FALLING:
+ case IRQF_TRIGGER_LOW:
+ val = RTL8365MB_INTR_POLARITY_LOW;
+ break;
+ default:
+ dev_err(smi->dev, "unsupported irq trigger type %u\n",
+ irq_trig);
+ ret = -EINVAL;
+ goto out_remove_irqdomain;
+ }
+
+ ret = regmap_update_bits(smi->map, RTL8365MB_INTR_POLARITY_REG,
+ RTL8365MB_INTR_POLARITY_MASK,
+ FIELD_PREP(RTL8365MB_INTR_POLARITY_MASK, val));
+ if (ret)
+ goto out_remove_irqdomain;
+
+ /* Disable the interrupt in case the chip has it enabled on reset */
+ ret = rtl8365mb_irq_disable(smi);
+ if (ret)
+ goto out_remove_irqdomain;
+
+ /* Clear the interrupt status register */
+ ret = regmap_write(smi->map, RTL8365MB_INTR_STATUS_REG,
+ RTL8365MB_INTR_ALL_MASK);
+ if (ret)
+ goto out_remove_irqdomain;
+
+ ret = request_threaded_irq(irq, NULL, rtl8365mb_irq, IRQF_ONESHOT,
+ "rtl8365mb", smi);
+ if (ret) {
+ dev_err(smi->dev, "failed to request irq: %d\n", ret);
+ goto out_remove_irqdomain;
+ }
+
+ /* Store the irq so that we know to free it during teardown */
+ mb->irq = irq;
+
+ ret = rtl8365mb_irq_enable(smi);
+ if (ret)
+ goto out_free_irq;
+
+ of_node_put(intc);
+
+ return 0;
+
+out_free_irq:
+ free_irq(mb->irq, smi);
+ mb->irq = 0;
+
+out_remove_irqdomain:
+ for (i = 0; i < smi->num_ports; i++) {
+ virq = irq_find_mapping(smi->irqdomain, i);
+ irq_dispose_mapping(virq);
+ }
+
+ irq_domain_remove(smi->irqdomain);
+ smi->irqdomain = NULL;
+
+out_put_node:
+ of_node_put(intc);
+
+ return ret;
+}
+
+static void rtl8365mb_irq_teardown(struct realtek_smi *smi)
+{
+ struct rtl8365mb *mb = smi->chip_data;
+ int virq;
+ int i;
+
+ if (mb->irq) {
+ free_irq(mb->irq, smi);
+ mb->irq = 0;
+ }
+
+ if (smi->irqdomain) {
+ for (i = 0; i < smi->num_ports; i++) {
+ virq = irq_find_mapping(smi->irqdomain, i);
+ irq_dispose_mapping(virq);
+ }
+
+ irq_domain_remove(smi->irqdomain);
+ smi->irqdomain = NULL;
+ }
+}
+
+static int rtl8365mb_cpu_config(struct realtek_smi *smi)
+{
+ struct rtl8365mb *mb = smi->chip_data;
+ struct rtl8365mb_cpu *cpu = &mb->cpu;
+ u32 val;
+ int ret;
+
+ ret = regmap_update_bits(smi->map, RTL8365MB_CPU_PORT_MASK_REG,
+ RTL8365MB_CPU_PORT_MASK_MASK,
+ FIELD_PREP(RTL8365MB_CPU_PORT_MASK_MASK,
+ cpu->mask));
+ if (ret)
+ return ret;
+
+ val = FIELD_PREP(RTL8365MB_CPU_CTRL_EN_MASK, cpu->enable ? 1 : 0) |
+ FIELD_PREP(RTL8365MB_CPU_CTRL_INSERTMODE_MASK, cpu->insert) |
+ FIELD_PREP(RTL8365MB_CPU_CTRL_TAG_POSITION_MASK, cpu->position) |
+ FIELD_PREP(RTL8365MB_CPU_CTRL_RXBYTECOUNT_MASK, cpu->rx_length) |
+ FIELD_PREP(RTL8365MB_CPU_CTRL_TAG_FORMAT_MASK, cpu->format) |
+ FIELD_PREP(RTL8365MB_CPU_CTRL_TRAP_PORT_MASK, cpu->trap_port) |
+ FIELD_PREP(RTL8365MB_CPU_CTRL_TRAP_PORT_EXT_MASK,
+ cpu->trap_port >> 3);
+ ret = regmap_write(smi->map, RTL8365MB_CPU_CTRL_REG, val);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int rtl8365mb_switch_init(struct realtek_smi *smi)
+{
+ struct rtl8365mb *mb = smi->chip_data;
+ int ret;
+ int i;
+
+ /* Do any chip-specific init jam before getting to the common stuff */
+ if (mb->jam_table) {
+ for (i = 0; i < mb->jam_size; i++) {
+ ret = regmap_write(smi->map, mb->jam_table[i].reg,
+ mb->jam_table[i].val);
+ if (ret)
+ return ret;
+ }
+ }
+
+ /* Common init jam */
+ for (i = 0; i < ARRAY_SIZE(rtl8365mb_init_jam_common); i++) {
+ ret = regmap_write(smi->map, rtl8365mb_init_jam_common[i].reg,
+ rtl8365mb_init_jam_common[i].val);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rtl8365mb_reset_chip(struct realtek_smi *smi)
+{
+ u32 val;
+
+ realtek_smi_write_reg_noack(smi, RTL8365MB_CHIP_RESET_REG,
+ FIELD_PREP(RTL8365MB_CHIP_RESET_HW_MASK,
+ 1));
+
+ /* Realtek documentation says the chip needs 1 second to reset. Sleep
+ * for 100 ms before accessing any registers to prevent ACK timeouts.
+ */
+ msleep(100);
+ return regmap_read_poll_timeout(smi->map, RTL8365MB_CHIP_RESET_REG, val,
+ !(val & RTL8365MB_CHIP_RESET_HW_MASK),
+ 20000, 1e6);
+}
+
+static int rtl8365mb_setup(struct dsa_switch *ds)
+{
+ struct realtek_smi *smi = ds->priv;
+ struct rtl8365mb *mb;
+ int ret;
+ int i;
+
+ mb = smi->chip_data;
+
+ ret = rtl8365mb_reset_chip(smi);
+ if (ret) {
+ dev_err(smi->dev, "failed to reset chip: %d\n", ret);
+ goto out_error;
+ }
+
+ /* Configure switch to vendor-defined initial state */
+ ret = rtl8365mb_switch_init(smi);
+ if (ret) {
+ dev_err(smi->dev, "failed to initialize switch: %d\n", ret);
+ goto out_error;
+ }
+
+ /* Set up cascading IRQs */
+ ret = rtl8365mb_irq_setup(smi);
+ if (ret == -EPROBE_DEFER)
+ return ret;
+ else if (ret)
+ dev_info(smi->dev, "no interrupt support\n");
+
+ /* Configure CPU tagging */
+ ret = rtl8365mb_cpu_config(smi);
+ if (ret)
+ goto out_teardown_irq;
+
+ /* Configure ports */
+ for (i = 0; i < smi->num_ports; i++) {
+ struct rtl8365mb_port *p = &mb->ports[i];
+
+ if (dsa_is_unused_port(smi->ds, i))
+ continue;
+
+ /* Set up per-port private data */
+ p->smi = smi;
+ p->index = i;
+
+ /* Forward only to the CPU */
+ ret = rtl8365mb_port_set_isolation(smi, i, BIT(smi->cpu_port));
+ if (ret)
+ goto out_teardown_irq;
+
+ /* Disable learning */
+ ret = rtl8365mb_port_set_learning(smi, i, false);
+ if (ret)
+ goto out_teardown_irq;
+
+ /* Set the initial STP state of all ports to DISABLED, otherwise
+ * ports will still forward frames to the CPU despite being
+ * administratively down by default.
+ */
+ rtl8365mb_port_stp_state_set(smi->ds, i, BR_STATE_DISABLED);
+ }
+
+ /* Set maximum packet length to 1536 bytes */
+ ret = regmap_update_bits(smi->map, RTL8365MB_CFG0_MAX_LEN_REG,
+ RTL8365MB_CFG0_MAX_LEN_MASK,
+ FIELD_PREP(RTL8365MB_CFG0_MAX_LEN_MASK, 1536));
+ if (ret)
+ goto out_teardown_irq;
+
+ ret = realtek_smi_setup_mdio(smi);
+ if (ret) {
+ dev_err(smi->dev, "could not set up MDIO bus\n");
+ goto out_teardown_irq;
+ }
+
+ /* Start statistics counter polling */
+ rtl8365mb_stats_setup(smi);
+
+ return 0;
+
+out_teardown_irq:
+ rtl8365mb_irq_teardown(smi);
+
+out_error:
+ return ret;
+}
+
+static void rtl8365mb_teardown(struct dsa_switch *ds)
+{
+ struct realtek_smi *smi = ds->priv;
+
+ rtl8365mb_stats_teardown(smi);
+ rtl8365mb_irq_teardown(smi);
+}
+
+static int rtl8365mb_get_chip_id_and_ver(struct regmap *map, u32 *id, u32 *ver)
+{
+ int ret;
+
+ /* For some reason we have to write a magic value to an arbitrary
+ * register whenever accessing the chip ID/version registers.
+ */
+ ret = regmap_write(map, RTL8365MB_MAGIC_REG, RTL8365MB_MAGIC_VALUE);
+ if (ret)
+ return ret;
+
+ ret = regmap_read(map, RTL8365MB_CHIP_ID_REG, id);
+ if (ret)
+ return ret;
+
+ ret = regmap_read(map, RTL8365MB_CHIP_VER_REG, ver);
+ if (ret)
+ return ret;
+
+ /* Reset magic register */
+ ret = regmap_write(map, RTL8365MB_MAGIC_REG, 0);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int rtl8365mb_detect(struct realtek_smi *smi)
+{
+ struct rtl8365mb *mb = smi->chip_data;
+ u32 chip_id;
+ u32 chip_ver;
+ int ret;
+
+ ret = rtl8365mb_get_chip_id_and_ver(smi->map, &chip_id, &chip_ver);
+ if (ret) {
+ dev_err(smi->dev, "failed to read chip id and version: %d\n",
+ ret);
+ return ret;
+ }
+
+ switch (chip_id) {
+ case RTL8365MB_CHIP_ID_8365MB_VC:
+ dev_info(smi->dev,
+ "found an RTL8365MB-VC switch (ver=0x%04x)\n",
+ chip_ver);
+
+ smi->cpu_port = RTL8365MB_CPU_PORT_NUM_8365MB_VC;
+ smi->num_ports = smi->cpu_port + 1;
+
+ mb->smi = smi;
+ mb->chip_id = chip_id;
+ mb->chip_ver = chip_ver;
+ mb->port_mask = BIT(smi->num_ports) - 1;
+ mb->learn_limit_max = RTL8365MB_LEARN_LIMIT_MAX_8365MB_VC;
+ mb->jam_table = rtl8365mb_init_jam_8365mb_vc;
+ mb->jam_size = ARRAY_SIZE(rtl8365mb_init_jam_8365mb_vc);
+
+ mb->cpu.enable = 1;
+ mb->cpu.mask = BIT(smi->cpu_port);
+ mb->cpu.trap_port = smi->cpu_port;
+ mb->cpu.insert = RTL8365MB_CPU_INSERT_TO_ALL;
+ mb->cpu.position = RTL8365MB_CPU_POS_AFTER_SA;
+ mb->cpu.rx_length = RTL8365MB_CPU_RXLEN_64BYTES;
+ mb->cpu.format = RTL8365MB_CPU_FORMAT_8BYTES;
+
+ break;
+ default:
+ dev_err(smi->dev,
+ "found an unknown Realtek switch (id=0x%04x, ver=0x%04x)\n",
+ chip_id, chip_ver);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static const struct dsa_switch_ops rtl8365mb_switch_ops = {
+ .get_tag_protocol = rtl8365mb_get_tag_protocol,
+ .setup = rtl8365mb_setup,
+ .teardown = rtl8365mb_teardown,
+ .phylink_validate = rtl8365mb_phylink_validate,
+ .phylink_mac_config = rtl8365mb_phylink_mac_config,
+ .phylink_mac_link_down = rtl8365mb_phylink_mac_link_down,
+ .phylink_mac_link_up = rtl8365mb_phylink_mac_link_up,
+ .port_stp_state_set = rtl8365mb_port_stp_state_set,
+ .get_strings = rtl8365mb_get_strings,
+ .get_ethtool_stats = rtl8365mb_get_ethtool_stats,
+ .get_sset_count = rtl8365mb_get_sset_count,
+ .get_eth_phy_stats = rtl8365mb_get_phy_stats,
+ .get_eth_mac_stats = rtl8365mb_get_mac_stats,
+ .get_eth_ctrl_stats = rtl8365mb_get_ctrl_stats,
+ .get_stats64 = rtl8365mb_get_stats64,
+};
+
+static const struct realtek_smi_ops rtl8365mb_smi_ops = {
+ .detect = rtl8365mb_detect,
+ .phy_read = rtl8365mb_phy_read,
+ .phy_write = rtl8365mb_phy_write,
+};
+
+const struct realtek_smi_variant rtl8365mb_variant = {
+ .ds_ops = &rtl8365mb_switch_ops,
+ .ops = &rtl8365mb_smi_ops,
+ .clk_delay = 10,
+ .cmd_read = 0xb9,
+ .cmd_write = 0xb8,
+ .chip_data_sz = sizeof(struct rtl8365mb),
+};
+EXPORT_SYMBOL_GPL(rtl8365mb_variant);
diff --git a/drivers/net/dsa/rtl8366.c b/drivers/net/dsa/rtl8366.c
index 75897a369096..bdb8d8d34880 100644
--- a/drivers/net/dsa/rtl8366.c
+++ b/drivers/net/dsa/rtl8366.c
@@ -292,89 +292,6 @@ int rtl8366_reset_vlan(struct realtek_smi *smi)
}
EXPORT_SYMBOL_GPL(rtl8366_reset_vlan);
-int rtl8366_init_vlan(struct realtek_smi *smi)
-{
- int port;
- int ret;
-
- ret = rtl8366_reset_vlan(smi);
- if (ret)
- return ret;
-
- /* Loop over the available ports, for each port, associate
- * it with the VLAN (port+1)
- */
- for (port = 0; port < smi->num_ports; port++) {
- u32 mask;
-
- if (port == smi->cpu_port)
- /* For the CPU port, make all ports members of this
- * VLAN.
- */
- mask = GENMASK((int)smi->num_ports - 1, 0);
- else
- /* For all other ports, enable itself plus the
- * CPU port.
- */
- mask = BIT(port) | BIT(smi->cpu_port);
-
- /* For each port, set the port as member of VLAN (port+1)
- * and untagged, except for the CPU port: the CPU port (5) is
- * member of VLAN 6 and so are ALL the other ports as well.
- * Use filter 0 (no filter).
- */
- dev_info(smi->dev, "VLAN%d port mask for port %d, %08x\n",
- (port + 1), port, mask);
- ret = rtl8366_set_vlan(smi, (port + 1), mask, mask, 0);
- if (ret)
- return ret;
-
- dev_info(smi->dev, "VLAN%d port %d, PVID set to %d\n",
- (port + 1), port, (port + 1));
- ret = rtl8366_set_pvid(smi, port, (port + 1));
- if (ret)
- return ret;
- }
-
- return rtl8366_enable_vlan(smi, true);
-}
-EXPORT_SYMBOL_GPL(rtl8366_init_vlan);
-
-int rtl8366_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
- struct netlink_ext_ack *extack)
-{
- struct realtek_smi *smi = ds->priv;
- struct rtl8366_vlan_4k vlan4k;
- int ret;
-
- /* Use VLAN nr port + 1 since VLAN0 is not valid */
- if (!smi->ops->is_vlan_valid(smi, port + 1))
- return -EINVAL;
-
- dev_info(smi->dev, "%s filtering on port %d\n",
- vlan_filtering ? "enable" : "disable",
- port);
-
- /* TODO:
- * The hardware support filter ID (FID) 0..7, I have no clue how to
- * support this in the driver when the callback only says on/off.
- */
- ret = smi->ops->get_vlan_4k(smi, port + 1, &vlan4k);
- if (ret)
- return ret;
-
- /* Just set the filter to FID 1 for now then */
- ret = rtl8366_set_vlan(smi, port + 1,
- vlan4k.member,
- vlan4k.untag,
- 1);
- if (ret)
- return ret;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(rtl8366_vlan_filtering);
-
int rtl8366_vlan_add(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan,
struct netlink_ext_ack *extack)
@@ -401,12 +318,9 @@ int rtl8366_vlan_add(struct dsa_switch *ds, int port,
return ret;
}
- dev_info(smi->dev, "add VLAN %d on port %d, %s, %s\n",
- vlan->vid, port, untagged ? "untagged" : "tagged",
- pvid ? " PVID" : "no PVID");
-
- if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port))
- dev_err(smi->dev, "port is DSA or CPU port\n");
+ dev_dbg(smi->dev, "add VLAN %d on port %d, %s, %s\n",
+ vlan->vid, port, untagged ? "untagged" : "tagged",
+ pvid ? "PVID" : "no PVID");
member |= BIT(port);
@@ -439,7 +353,7 @@ int rtl8366_vlan_del(struct dsa_switch *ds, int port,
struct realtek_smi *smi = ds->priv;
int ret, i;
- dev_info(smi->dev, "del VLAN %04x on port %d\n", vlan->vid, port);
+ dev_dbg(smi->dev, "del VLAN %d on port %d\n", vlan->vid, port);
for (i = 0; i < smi->num_vlan_mc; i++) {
struct rtl8366_vlan_mc vlanmc;
@@ -457,7 +371,7 @@ int rtl8366_vlan_del(struct dsa_switch *ds, int port,
* anymore then clear the whole member
* config so it can be reused.
*/
- if (!vlanmc.member && vlanmc.untag) {
+ if (!vlanmc.member) {
vlanmc.vid = 0;
vlanmc.priority = 0;
vlanmc.fid = 0;
diff --git a/drivers/net/dsa/rtl8366rb.c b/drivers/net/dsa/rtl8366rb.c
index a89093bc6c6a..03deacd83e61 100644
--- a/drivers/net/dsa/rtl8366rb.c
+++ b/drivers/net/dsa/rtl8366rb.c
@@ -14,6 +14,7 @@
#include <linux/bitops.h>
#include <linux/etherdevice.h>
+#include <linux/if_bridge.h>
#include <linux/interrupt.h>
#include <linux/irqdomain.h>
#include <linux/irqchip/chained_irq.h>
@@ -42,9 +43,12 @@
/* Port Enable Control register */
#define RTL8366RB_PECR 0x0001
-/* Switch Security Control registers */
-#define RTL8366RB_SSCR0 0x0002
-#define RTL8366RB_SSCR1 0x0003
+/* Switch per-port learning disablement register */
+#define RTL8366RB_PORT_LEARNDIS_CTRL 0x0002
+
+/* Security control, actually aging register */
+#define RTL8366RB_SECURITY_CTRL 0x0003
+
#define RTL8366RB_SSCR2 0x0004
#define RTL8366RB_SSCR2_DROP_UNKNOWN_DA BIT(0)
@@ -106,6 +110,18 @@
#define RTL8366RB_POWER_SAVING_REG 0x0021
+/* Spanning tree status (STP) control, two bits per port per FID */
+#define RTL8366RB_STP_STATE_BASE 0x0050 /* 0x0050..0x0057 */
+#define RTL8366RB_STP_STATE_DISABLED 0x0
+#define RTL8366RB_STP_STATE_BLOCKING 0x1
+#define RTL8366RB_STP_STATE_LEARNING 0x2
+#define RTL8366RB_STP_STATE_FORWARDING 0x3
+#define RTL8366RB_STP_MASK GENMASK(1, 0)
+#define RTL8366RB_STP_STATE(port, state) \
+ ((state) << ((port) * 2))
+#define RTL8366RB_STP_STATE_MASK(port) \
+ RTL8366RB_STP_STATE((port), RTL8366RB_STP_MASK)
+
/* CPU port control reg */
#define RTL8368RB_CPU_CTRL_REG 0x0061
#define RTL8368RB_CPU_PORTS_MSK 0x00FF
@@ -143,6 +159,21 @@
#define RTL8366RB_PHY_NO_OFFSET 9
#define RTL8366RB_PHY_NO_MASK (0x1f << 9)
+/* VLAN Ingress Control Register 1, one bit per port.
+ * bit 0 .. 5 will make the switch drop ingress frames without
+ * VID such as untagged or priority-tagged frames for respective
+ * port.
+ * bit 6 .. 11 will make the switch drop ingress frames carrying
+ * a C-tag with VID != 0 for respective port.
+ */
+#define RTL8366RB_VLAN_INGRESS_CTRL1_REG 0x037E
+#define RTL8366RB_VLAN_INGRESS_CTRL1_DROP(port) (BIT((port)) | BIT((port) + 6))
+
+/* VLAN Ingress Control Register 2, one bit per port.
+ * bit0 .. bit5 will make the switch drop all ingress frames with
+ * a VLAN classification that does not include the port is in its
+ * member set.
+ */
#define RTL8366RB_VLAN_INGRESS_CTRL2_REG 0x037f
/* LED control registers */
@@ -215,6 +246,7 @@
#define RTL8366RB_NUM_LEDGROUPS 4
#define RTL8366RB_NUM_VIDS 4096
#define RTL8366RB_PRIORITYMAX 7
+#define RTL8366RB_NUM_FIDS 8
#define RTL8366RB_FIDMAX 7
#define RTL8366RB_PORT_1 BIT(0) /* In userspace port 0 */
@@ -300,6 +332,13 @@
#define RTL8366RB_INTERRUPT_STATUS_REG 0x0442
#define RTL8366RB_NUM_INTERRUPT 14 /* 0..13 */
+/* Port isolation registers */
+#define RTL8366RB_PORT_ISO_BASE 0x0F08
+#define RTL8366RB_PORT_ISO(pnum) (RTL8366RB_PORT_ISO_BASE + (pnum))
+#define RTL8366RB_PORT_ISO_EN BIT(0)
+#define RTL8366RB_PORT_ISO_PORTS_MASK GENMASK(7, 1)
+#define RTL8366RB_PORT_ISO_PORTS(pmask) ((pmask) << 1)
+
/* bits 0..5 enable force when cleared */
#define RTL8366RB_MAC_FORCE_CTRL_REG 0x0F11
@@ -314,9 +353,11 @@
/**
* struct rtl8366rb - RTL8366RB-specific data
* @max_mtu: per-port max MTU setting
+ * @pvid_enabled: if PVID is set for respective port
*/
struct rtl8366rb {
unsigned int max_mtu[RTL8366RB_NUM_PORTS];
+ bool pvid_enabled[RTL8366RB_NUM_PORTS];
};
static struct rtl8366_mib_counter rtl8366rb_mib_counters[] = {
@@ -835,6 +876,21 @@ static int rtl8366rb_setup(struct dsa_switch *ds)
if (ret)
return ret;
+ /* Isolate all user ports so they can only send packets to itself and the CPU port */
+ for (i = 0; i < RTL8366RB_PORT_NUM_CPU; i++) {
+ ret = regmap_write(smi->map, RTL8366RB_PORT_ISO(i),
+ RTL8366RB_PORT_ISO_PORTS(BIT(RTL8366RB_PORT_NUM_CPU)) |
+ RTL8366RB_PORT_ISO_EN);
+ if (ret)
+ return ret;
+ }
+ /* CPU port can send packets to all ports */
+ ret = regmap_write(smi->map, RTL8366RB_PORT_ISO(RTL8366RB_PORT_NUM_CPU),
+ RTL8366RB_PORT_ISO_PORTS(dsa_user_ports(ds)) |
+ RTL8366RB_PORT_ISO_EN);
+ if (ret)
+ return ret;
+
/* Set up the "green ethernet" feature */
ret = rtl8366rb_jam_table(rtl8366rb_green_jam,
ARRAY_SIZE(rtl8366rb_green_jam), smi, false);
@@ -888,13 +944,14 @@ static int rtl8366rb_setup(struct dsa_switch *ds)
/* layer 2 size, see rtl8366rb_change_mtu() */
rb->max_mtu[i] = 1532;
- /* Enable learning for all ports */
- ret = regmap_write(smi->map, RTL8366RB_SSCR0, 0);
+ /* Disable learning for all ports */
+ ret = regmap_write(smi->map, RTL8366RB_PORT_LEARNDIS_CTRL,
+ RTL8366RB_PORT_ALL);
if (ret)
return ret;
/* Enable auto ageing for all ports */
- ret = regmap_write(smi->map, RTL8366RB_SSCR1, 0);
+ ret = regmap_write(smi->map, RTL8366RB_SECURITY_CTRL, 0);
if (ret)
return ret;
@@ -911,11 +968,13 @@ static int rtl8366rb_setup(struct dsa_switch *ds)
if (ret)
return ret;
- /* Discard VLAN tagged packets if the port is not a member of
- * the VLAN with which the packets is associated.
- */
+ /* Accept all packets by default, we enable filtering on-demand */
+ ret = regmap_write(smi->map, RTL8366RB_VLAN_INGRESS_CTRL1_REG,
+ 0);
+ if (ret)
+ return ret;
ret = regmap_write(smi->map, RTL8366RB_VLAN_INGRESS_CTRL2_REG,
- RTL8366RB_PORT_ALL);
+ 0);
if (ret)
return ret;
@@ -963,7 +1022,7 @@ static int rtl8366rb_setup(struct dsa_switch *ds)
return ret;
}
- ret = rtl8366_init_vlan(smi);
+ ret = rtl8366_reset_vlan(smi);
if (ret)
return ret;
@@ -977,8 +1036,6 @@ static int rtl8366rb_setup(struct dsa_switch *ds)
return -ENODEV;
}
- ds->configure_vlan_while_not_filtering = false;
-
return 0;
}
@@ -1127,6 +1184,190 @@ rtl8366rb_port_disable(struct dsa_switch *ds, int port)
rb8366rb_set_port_led(smi, port, false);
}
+static int
+rtl8366rb_port_bridge_join(struct dsa_switch *ds, int port,
+ struct net_device *bridge)
+{
+ struct realtek_smi *smi = ds->priv;
+ unsigned int port_bitmap = 0;
+ int ret, i;
+
+ /* Loop over all other ports than the current one */
+ for (i = 0; i < RTL8366RB_PORT_NUM_CPU; i++) {
+ /* Current port handled last */
+ if (i == port)
+ continue;
+ /* Not on this bridge */
+ if (dsa_to_port(ds, i)->bridge_dev != bridge)
+ continue;
+ /* Join this port to each other port on the bridge */
+ ret = regmap_update_bits(smi->map, RTL8366RB_PORT_ISO(i),
+ RTL8366RB_PORT_ISO_PORTS(BIT(port)),
+ RTL8366RB_PORT_ISO_PORTS(BIT(port)));
+ if (ret)
+ dev_err(smi->dev, "failed to join port %d\n", port);
+
+ port_bitmap |= BIT(i);
+ }
+
+ /* Set the bits for the ports we can access */
+ return regmap_update_bits(smi->map, RTL8366RB_PORT_ISO(port),
+ RTL8366RB_PORT_ISO_PORTS(port_bitmap),
+ RTL8366RB_PORT_ISO_PORTS(port_bitmap));
+}
+
+static void
+rtl8366rb_port_bridge_leave(struct dsa_switch *ds, int port,
+ struct net_device *bridge)
+{
+ struct realtek_smi *smi = ds->priv;
+ unsigned int port_bitmap = 0;
+ int ret, i;
+
+ /* Loop over all other ports than this one */
+ for (i = 0; i < RTL8366RB_PORT_NUM_CPU; i++) {
+ /* Current port handled last */
+ if (i == port)
+ continue;
+ /* Not on this bridge */
+ if (dsa_to_port(ds, i)->bridge_dev != bridge)
+ continue;
+ /* Remove this port from any other port on the bridge */
+ ret = regmap_update_bits(smi->map, RTL8366RB_PORT_ISO(i),
+ RTL8366RB_PORT_ISO_PORTS(BIT(port)), 0);
+ if (ret)
+ dev_err(smi->dev, "failed to leave port %d\n", port);
+
+ port_bitmap |= BIT(i);
+ }
+
+ /* Clear the bits for the ports we can not access, leave ourselves */
+ regmap_update_bits(smi->map, RTL8366RB_PORT_ISO(port),
+ RTL8366RB_PORT_ISO_PORTS(port_bitmap), 0);
+}
+
+/**
+ * rtl8366rb_drop_untagged() - make the switch drop untagged and C-tagged frames
+ * @smi: SMI state container
+ * @port: the port to drop untagged and C-tagged frames on
+ * @drop: whether to drop or pass untagged and C-tagged frames
+ */
+static int rtl8366rb_drop_untagged(struct realtek_smi *smi, int port, bool drop)
+{
+ return regmap_update_bits(smi->map, RTL8366RB_VLAN_INGRESS_CTRL1_REG,
+ RTL8366RB_VLAN_INGRESS_CTRL1_DROP(port),
+ drop ? RTL8366RB_VLAN_INGRESS_CTRL1_DROP(port) : 0);
+}
+
+static int rtl8366rb_vlan_filtering(struct dsa_switch *ds, int port,
+ bool vlan_filtering,
+ struct netlink_ext_ack *extack)
+{
+ struct realtek_smi *smi = ds->priv;
+ struct rtl8366rb *rb;
+ int ret;
+
+ rb = smi->chip_data;
+
+ dev_dbg(smi->dev, "port %d: %s VLAN filtering\n", port,
+ vlan_filtering ? "enable" : "disable");
+
+ /* If the port is not in the member set, the frame will be dropped */
+ ret = regmap_update_bits(smi->map, RTL8366RB_VLAN_INGRESS_CTRL2_REG,
+ BIT(port), vlan_filtering ? BIT(port) : 0);
+ if (ret)
+ return ret;
+
+ /* If VLAN filtering is enabled and PVID is also enabled, we must
+ * not drop any untagged or C-tagged frames. If we turn off VLAN
+ * filtering on a port, we need to accept any frames.
+ */
+ if (vlan_filtering)
+ ret = rtl8366rb_drop_untagged(smi, port, !rb->pvid_enabled[port]);
+ else
+ ret = rtl8366rb_drop_untagged(smi, port, false);
+
+ return ret;
+}
+
+static int
+rtl8366rb_port_pre_bridge_flags(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack)
+{
+ /* We support enabling/disabling learning */
+ if (flags.mask & ~(BR_LEARNING))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+rtl8366rb_port_bridge_flags(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack)
+{
+ struct realtek_smi *smi = ds->priv;
+ int ret;
+
+ if (flags.mask & BR_LEARNING) {
+ ret = regmap_update_bits(smi->map, RTL8366RB_PORT_LEARNDIS_CTRL,
+ BIT(port),
+ (flags.val & BR_LEARNING) ? 0 : BIT(port));
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void
+rtl8366rb_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
+{
+ struct realtek_smi *smi = ds->priv;
+ u32 val;
+ int i;
+
+ switch (state) {
+ case BR_STATE_DISABLED:
+ val = RTL8366RB_STP_STATE_DISABLED;
+ break;
+ case BR_STATE_BLOCKING:
+ case BR_STATE_LISTENING:
+ val = RTL8366RB_STP_STATE_BLOCKING;
+ break;
+ case BR_STATE_LEARNING:
+ val = RTL8366RB_STP_STATE_LEARNING;
+ break;
+ case BR_STATE_FORWARDING:
+ val = RTL8366RB_STP_STATE_FORWARDING;
+ break;
+ default:
+ dev_err(smi->dev, "unknown bridge state requested\n");
+ return;
+ }
+
+ /* Set the same status for the port on all the FIDs */
+ for (i = 0; i < RTL8366RB_NUM_FIDS; i++) {
+ regmap_update_bits(smi->map, RTL8366RB_STP_STATE_BASE + i,
+ RTL8366RB_STP_STATE_MASK(port),
+ RTL8366RB_STP_STATE(port, val));
+ }
+}
+
+static void
+rtl8366rb_port_fast_age(struct dsa_switch *ds, int port)
+{
+ struct realtek_smi *smi = ds->priv;
+
+ /* This will age out any learned L2 entries */
+ regmap_update_bits(smi->map, RTL8366RB_SECURITY_CTRL,
+ BIT(port), BIT(port));
+ /* Restore the normal state of things */
+ regmap_update_bits(smi->map, RTL8366RB_SECURITY_CTRL,
+ BIT(port), 0);
+}
+
static int rtl8366rb_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
{
struct realtek_smi *smi = ds->priv;
@@ -1338,24 +1579,44 @@ static int rtl8366rb_get_mc_index(struct realtek_smi *smi, int port, int *val)
static int rtl8366rb_set_mc_index(struct realtek_smi *smi, int port, int index)
{
+ struct rtl8366rb *rb;
+ bool pvid_enabled;
+ int ret;
+
+ rb = smi->chip_data;
+ pvid_enabled = !!index;
+
if (port >= smi->num_ports || index >= RTL8366RB_NUM_VLANS)
return -EINVAL;
- return regmap_update_bits(smi->map, RTL8366RB_PORT_VLAN_CTRL_REG(port),
+ ret = regmap_update_bits(smi->map, RTL8366RB_PORT_VLAN_CTRL_REG(port),
RTL8366RB_PORT_VLAN_CTRL_MASK <<
RTL8366RB_PORT_VLAN_CTRL_SHIFT(port),
(index & RTL8366RB_PORT_VLAN_CTRL_MASK) <<
RTL8366RB_PORT_VLAN_CTRL_SHIFT(port));
+ if (ret)
+ return ret;
+
+ rb->pvid_enabled[port] = pvid_enabled;
+
+ /* If VLAN filtering is enabled and PVID is also enabled, we must
+ * not drop any untagged or C-tagged frames. Make sure to update the
+ * filtering setting.
+ */
+ if (dsa_port_is_vlan_filtering(dsa_to_port(smi->ds, port)))
+ ret = rtl8366rb_drop_untagged(smi, port, !pvid_enabled);
+
+ return ret;
}
static bool rtl8366rb_is_vlan_valid(struct realtek_smi *smi, unsigned int vlan)
{
- unsigned int max = RTL8366RB_NUM_VLANS;
+ unsigned int max = RTL8366RB_NUM_VLANS - 1;
if (smi->vlan4k_enabled)
max = RTL8366RB_NUM_VIDS - 1;
- if (vlan == 0 || vlan > max)
+ if (vlan > max)
return false;
return true;
@@ -1510,11 +1771,17 @@ static const struct dsa_switch_ops rtl8366rb_switch_ops = {
.get_strings = rtl8366_get_strings,
.get_ethtool_stats = rtl8366_get_ethtool_stats,
.get_sset_count = rtl8366_get_sset_count,
- .port_vlan_filtering = rtl8366_vlan_filtering,
+ .port_bridge_join = rtl8366rb_port_bridge_join,
+ .port_bridge_leave = rtl8366rb_port_bridge_leave,
+ .port_vlan_filtering = rtl8366rb_vlan_filtering,
.port_vlan_add = rtl8366_vlan_add,
.port_vlan_del = rtl8366_vlan_del,
.port_enable = rtl8366rb_port_enable,
.port_disable = rtl8366rb_port_disable,
+ .port_pre_bridge_flags = rtl8366rb_port_pre_bridge_flags,
+ .port_bridge_flags = rtl8366rb_port_bridge_flags,
+ .port_stp_state_set = rtl8366rb_port_stp_state_set,
+ .port_fast_age = rtl8366rb_port_fast_age,
.port_change_mtu = rtl8366rb_change_mtu,
.port_max_mtu = rtl8366rb_max_mtu,
};
diff --git a/drivers/net/dsa/sja1105/sja1105.h b/drivers/net/dsa/sja1105/sja1105.h
index 5e5d24e7c02b..808419f3b808 100644
--- a/drivers/net/dsa/sja1105/sja1105.h
+++ b/drivers/net/dsa/sja1105/sja1105.h
@@ -20,6 +20,27 @@
#define SJA1105_AGEING_TIME_MS(ms) ((ms) / 10)
#define SJA1105_NUM_L2_POLICERS SJA1110_MAX_L2_POLICING_COUNT
+/* Calculated assuming 1Gbps, where the clock has 125 MHz (8 ns period)
+ * To avoid floating point operations, we'll multiply the degrees by 10
+ * to get a "phase" and get 1 decimal point precision.
+ */
+#define SJA1105_RGMII_DELAY_PS_TO_PHASE(ps) \
+ (((ps) * 360) / 800)
+#define SJA1105_RGMII_DELAY_PHASE_TO_PS(phase) \
+ ((800 * (phase)) / 360)
+#define SJA1105_RGMII_DELAY_PHASE_TO_HW(phase) \
+ (((phase) - 738) / 9)
+#define SJA1105_RGMII_DELAY_PS_TO_HW(ps) \
+ SJA1105_RGMII_DELAY_PHASE_TO_HW(SJA1105_RGMII_DELAY_PS_TO_PHASE(ps))
+
+/* Valid range in degrees is a value between 73.8 and 101.7
+ * in 0.9 degree increments
+ */
+#define SJA1105_RGMII_DELAY_MIN_PS \
+ SJA1105_RGMII_DELAY_PHASE_TO_PS(738)
+#define SJA1105_RGMII_DELAY_MAX_PS \
+ SJA1105_RGMII_DELAY_PHASE_TO_PS(1017)
+
typedef enum {
SPI_READ = 0,
SPI_WRITE = 1,
@@ -222,16 +243,14 @@ struct sja1105_flow_block {
struct sja1105_private {
struct sja1105_static_config static_config;
- bool rgmii_rx_delay[SJA1105_MAX_NUM_PORTS];
- bool rgmii_tx_delay[SJA1105_MAX_NUM_PORTS];
+ int rgmii_rx_delay_ps[SJA1105_MAX_NUM_PORTS];
+ int rgmii_tx_delay_ps[SJA1105_MAX_NUM_PORTS];
phy_interface_t phy_mode[SJA1105_MAX_NUM_PORTS];
bool fixed_link[SJA1105_MAX_NUM_PORTS];
- bool vlan_aware;
unsigned long ucast_egress_floods;
unsigned long bcast_egress_floods;
const struct sja1105_info *info;
size_t max_xfer_len;
- struct gpio_desc *reset_gpio;
struct spi_device *spidev;
struct dsa_switch *ds;
u16 bridge_pvid[SJA1105_MAX_NUM_PORTS];
diff --git a/drivers/net/dsa/sja1105/sja1105_clocking.c b/drivers/net/dsa/sja1105/sja1105_clocking.c
index 5bbf1707f2af..e3699f76f6d7 100644
--- a/drivers/net/dsa/sja1105/sja1105_clocking.c
+++ b/drivers/net/dsa/sja1105/sja1105_clocking.c
@@ -498,17 +498,6 @@ sja1110_cfg_pad_mii_id_packing(void *buf, struct sja1105_cfg_pad_mii_id *cmd,
sja1105_packing(buf, &cmd->txc_pd, 0, 0, size, op);
}
-/* Valid range in degrees is an integer between 73.8 and 101.7 */
-static u64 sja1105_rgmii_delay(u64 phase)
-{
- /* UM11040.pdf: The delay in degree phase is 73.8 + delay_tune * 0.9.
- * To avoid floating point operations we'll multiply by 10
- * and get 1 decimal point precision.
- */
- phase *= 10;
- return (phase - 738) / 9;
-}
-
/* The RGMII delay setup procedure is 2-step and gets called upon each
* .phylink_mac_config. Both are strategic.
* The reason is that the RX Tunable Delay Line of the SJA1105 MAC has issues
@@ -521,13 +510,15 @@ int sja1105pqrs_setup_rgmii_delay(const void *ctx, int port)
const struct sja1105_private *priv = ctx;
const struct sja1105_regs *regs = priv->info->regs;
struct sja1105_cfg_pad_mii_id pad_mii_id = {0};
+ int rx_delay = priv->rgmii_rx_delay_ps[port];
+ int tx_delay = priv->rgmii_tx_delay_ps[port];
u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
int rc;
- if (priv->rgmii_rx_delay[port])
- pad_mii_id.rxc_delay = sja1105_rgmii_delay(90);
- if (priv->rgmii_tx_delay[port])
- pad_mii_id.txc_delay = sja1105_rgmii_delay(90);
+ if (rx_delay)
+ pad_mii_id.rxc_delay = SJA1105_RGMII_DELAY_PS_TO_HW(rx_delay);
+ if (tx_delay)
+ pad_mii_id.txc_delay = SJA1105_RGMII_DELAY_PS_TO_HW(tx_delay);
/* Stage 1: Turn the RGMII delay lines off. */
pad_mii_id.rxc_bypass = 1;
@@ -542,11 +533,11 @@ int sja1105pqrs_setup_rgmii_delay(const void *ctx, int port)
return rc;
/* Stage 2: Turn the RGMII delay lines on. */
- if (priv->rgmii_rx_delay[port]) {
+ if (rx_delay) {
pad_mii_id.rxc_bypass = 0;
pad_mii_id.rxc_pd = 0;
}
- if (priv->rgmii_tx_delay[port]) {
+ if (tx_delay) {
pad_mii_id.txc_bypass = 0;
pad_mii_id.txc_pd = 0;
}
@@ -561,20 +552,22 @@ int sja1110_setup_rgmii_delay(const void *ctx, int port)
const struct sja1105_private *priv = ctx;
const struct sja1105_regs *regs = priv->info->regs;
struct sja1105_cfg_pad_mii_id pad_mii_id = {0};
+ int rx_delay = priv->rgmii_rx_delay_ps[port];
+ int tx_delay = priv->rgmii_tx_delay_ps[port];
u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
pad_mii_id.rxc_pd = 1;
pad_mii_id.txc_pd = 1;
- if (priv->rgmii_rx_delay[port]) {
- pad_mii_id.rxc_delay = sja1105_rgmii_delay(90);
+ if (rx_delay) {
+ pad_mii_id.rxc_delay = SJA1105_RGMII_DELAY_PS_TO_HW(rx_delay);
/* The "BYPASS" bit in SJA1110 is actually a "don't bypass" */
pad_mii_id.rxc_bypass = 1;
pad_mii_id.rxc_pd = 0;
}
- if (priv->rgmii_tx_delay[port]) {
- pad_mii_id.txc_delay = sja1105_rgmii_delay(90);
+ if (tx_delay) {
+ pad_mii_id.txc_delay = SJA1105_RGMII_DELAY_PS_TO_HW(tx_delay);
pad_mii_id.txc_bypass = 1;
pad_mii_id.txc_pd = 0;
}
diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
index 924c3f129992..1832d4bd3440 100644
--- a/drivers/net/dsa/sja1105/sja1105_main.c
+++ b/drivers/net/dsa/sja1105/sja1105_main.c
@@ -27,15 +27,29 @@
#define SJA1105_UNKNOWN_MULTICAST 0x010000000000ull
-static void sja1105_hw_reset(struct gpio_desc *gpio, unsigned int pulse_len,
- unsigned int startup_delay)
+/* Configure the optional reset pin and bring up switch */
+static int sja1105_hw_reset(struct device *dev, unsigned int pulse_len,
+ unsigned int startup_delay)
{
+ struct gpio_desc *gpio;
+
+ gpio = gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(gpio))
+ return PTR_ERR(gpio);
+
+ if (!gpio)
+ return 0;
+
gpiod_set_value_cansleep(gpio, 1);
/* Wait for minimum reset pulse length */
msleep(pulse_len);
gpiod_set_value_cansleep(gpio, 0);
/* Wait until chip is ready after reset */
msleep(startup_delay);
+
+ gpiod_put(gpio);
+
+ return 0;
}
static void
@@ -1095,27 +1109,78 @@ static int sja1105_static_config_load(struct sja1105_private *priv)
return sja1105_static_config_upload(priv);
}
-static int sja1105_parse_rgmii_delays(struct sja1105_private *priv)
+/* This is the "new way" for a MAC driver to configure its RGMII delay lines,
+ * based on the explicit "rx-internal-delay-ps" and "tx-internal-delay-ps"
+ * properties. It has the advantage of working with fixed links and with PHYs
+ * that apply RGMII delays too, and the MAC driver needs not perform any
+ * special checks.
+ *
+ * Previously we were acting upon the "phy-mode" property when we were
+ * operating in fixed-link, basically acting as a PHY, but with a reversed
+ * interpretation: PHY_INTERFACE_MODE_RGMII_TXID means that the MAC should
+ * behave as if it is connected to a PHY which has applied RGMII delays in the
+ * TX direction. So if anything, RX delays should have been added by the MAC,
+ * but we were adding TX delays.
+ *
+ * If the "{rx,tx}-internal-delay-ps" properties are not specified, we fall
+ * back to the legacy behavior and apply delays on fixed-link ports based on
+ * the reverse interpretation of the phy-mode. This is a deviation from the
+ * expected default behavior which is to simply apply no delays. To achieve
+ * that behavior with the new bindings, it is mandatory to specify
+ * "{rx,tx}-internal-delay-ps" with a value of 0.
+ */
+static int sja1105_parse_rgmii_delays(struct sja1105_private *priv, int port,
+ struct device_node *port_dn)
{
- struct dsa_switch *ds = priv->ds;
- int port;
+ phy_interface_t phy_mode = priv->phy_mode[port];
+ struct device *dev = &priv->spidev->dev;
+ int rx_delay = -1, tx_delay = -1;
- for (port = 0; port < ds->num_ports; port++) {
- if (!priv->fixed_link[port])
- continue;
+ if (!phy_interface_mode_is_rgmii(phy_mode))
+ return 0;
- if (priv->phy_mode[port] == PHY_INTERFACE_MODE_RGMII_RXID ||
- priv->phy_mode[port] == PHY_INTERFACE_MODE_RGMII_ID)
- priv->rgmii_rx_delay[port] = true;
+ of_property_read_u32(port_dn, "rx-internal-delay-ps", &rx_delay);
+ of_property_read_u32(port_dn, "tx-internal-delay-ps", &tx_delay);
- if (priv->phy_mode[port] == PHY_INTERFACE_MODE_RGMII_TXID ||
- priv->phy_mode[port] == PHY_INTERFACE_MODE_RGMII_ID)
- priv->rgmii_tx_delay[port] = true;
+ if (rx_delay == -1 && tx_delay == -1 && priv->fixed_link[port]) {
+ dev_warn(dev,
+ "Port %d interpreting RGMII delay settings based on \"phy-mode\" property, "
+ "please update device tree to specify \"rx-internal-delay-ps\" and "
+ "\"tx-internal-delay-ps\"",
+ port);
- if ((priv->rgmii_rx_delay[port] || priv->rgmii_tx_delay[port]) &&
- !priv->info->setup_rgmii_delay)
- return -EINVAL;
+ if (phy_mode == PHY_INTERFACE_MODE_RGMII_RXID ||
+ phy_mode == PHY_INTERFACE_MODE_RGMII_ID)
+ rx_delay = 2000;
+
+ if (phy_mode == PHY_INTERFACE_MODE_RGMII_TXID ||
+ phy_mode == PHY_INTERFACE_MODE_RGMII_ID)
+ tx_delay = 2000;
}
+
+ if (rx_delay < 0)
+ rx_delay = 0;
+ if (tx_delay < 0)
+ tx_delay = 0;
+
+ if ((rx_delay || tx_delay) && !priv->info->setup_rgmii_delay) {
+ dev_err(dev, "Chip cannot apply RGMII delays\n");
+ return -EINVAL;
+ }
+
+ if ((rx_delay && rx_delay < SJA1105_RGMII_DELAY_MIN_PS) ||
+ (tx_delay && tx_delay < SJA1105_RGMII_DELAY_MIN_PS) ||
+ (rx_delay > SJA1105_RGMII_DELAY_MAX_PS) ||
+ (tx_delay > SJA1105_RGMII_DELAY_MAX_PS)) {
+ dev_err(dev,
+ "port %d RGMII delay values out of range, must be between %d and %d ps\n",
+ port, SJA1105_RGMII_DELAY_MIN_PS, SJA1105_RGMII_DELAY_MAX_PS);
+ return -ERANGE;
+ }
+
+ priv->rgmii_rx_delay_ps[port] = rx_delay;
+ priv->rgmii_tx_delay_ps[port] = tx_delay;
+
return 0;
}
@@ -1166,6 +1231,10 @@ static int sja1105_parse_ports_node(struct sja1105_private *priv,
}
priv->phy_mode[index] = phy_mode;
+
+ err = sja1105_parse_rgmii_delays(priv, index, child);
+ if (err)
+ return err;
}
return 0;
@@ -1766,6 +1835,7 @@ static int sja1105_fdb_del(struct dsa_switch *ds, int port,
static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
dsa_fdb_dump_cb_t *cb, void *data)
{
+ struct dsa_port *dp = dsa_to_port(ds, port);
struct sja1105_private *priv = ds->priv;
struct device *dev = ds->dev;
int i;
@@ -1802,7 +1872,7 @@ static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
u64_to_ether_addr(l2_lookup.macaddr, macaddr);
/* We need to hide the dsa_8021q VLANs from the user. */
- if (!priv->vlan_aware)
+ if (!dsa_port_is_vlan_filtering(dp))
l2_lookup.vlanid = 0;
rc = cb(macaddr, l2_lookup.vlanid, l2_lookup.lockeds, data);
if (rc)
@@ -2295,11 +2365,6 @@ int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled,
tpid2 = ETH_P_SJA1105;
}
- if (priv->vlan_aware == enabled)
- return 0;
-
- priv->vlan_aware = enabled;
-
table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
general_params = table->entries;
/* EtherType used to identify inner tagged (C-tag) VLAN traffic */
@@ -2332,7 +2397,7 @@ int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled,
*/
table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS];
l2_lookup_params = table->entries;
- l2_lookup_params->shared_learn = !priv->vlan_aware;
+ l2_lookup_params->shared_learn = !enabled;
for (port = 0; port < ds->num_ports; port++) {
if (dsa_is_unused_port(ds, port))
@@ -2965,7 +3030,6 @@ static int sja1105_setup_ports(struct sja1105_private *priv)
continue;
dp->priv = sp;
- sp->dp = dp;
sp->data = tagger_data;
slave = dp->slave;
kthread_init_work(&sp->xmit_work, sja1105_port_deferred_xmit);
@@ -3229,17 +3293,14 @@ static int sja1105_probe(struct spi_device *spi)
return -EINVAL;
}
+ rc = sja1105_hw_reset(dev, 1, 1);
+ if (rc)
+ return rc;
+
priv = devm_kzalloc(dev, sizeof(struct sja1105_private), GFP_KERNEL);
if (!priv)
return -ENOMEM;
- /* Configure the optional reset pin and bring up switch */
- priv->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
- if (IS_ERR(priv->reset_gpio))
- dev_dbg(dev, "reset-gpios not defined, ignoring\n");
- else
- sja1105_hw_reset(priv->reset_gpio, 1, 1);
-
/* Populate our driver private structure (priv) based on
* the device tree node that was probed (spi)
*/
@@ -3311,15 +3372,6 @@ static int sja1105_probe(struct spi_device *spi)
return rc;
}
- /* Error out early if internal delays are required through DT
- * and we can't apply them.
- */
- rc = sja1105_parse_rgmii_delays(priv);
- if (rc < 0) {
- dev_err(ds->dev, "RGMII delay not supported\n");
- return rc;
- }
-
if (IS_ENABLED(CONFIG_NET_SCH_CBS)) {
priv->cbs = devm_kcalloc(dev, priv->info->num_cbs_shapers,
sizeof(struct sja1105_cbs_entry),
diff --git a/drivers/net/dsa/sja1105/sja1105_vl.c b/drivers/net/dsa/sja1105/sja1105_vl.c
index 6802f4057cc0..f5dca6a9b0f9 100644
--- a/drivers/net/dsa/sja1105/sja1105_vl.c
+++ b/drivers/net/dsa/sja1105/sja1105_vl.c
@@ -394,7 +394,8 @@ static int sja1105_init_virtual_links(struct sja1105_private *priv,
vl_lookup[k].vlanid = rule->key.vl.vid;
vl_lookup[k].vlanprior = rule->key.vl.pcp;
} else {
- u16 vid = dsa_8021q_rx_vid(priv->ds, port);
+ struct dsa_port *dp = dsa_to_port(priv->ds, port);
+ u16 vid = dsa_tag_8021q_rx_vid(dp);
vl_lookup[k].vlanid = vid;
vl_lookup[k].vlanprior = 0;
@@ -494,13 +495,15 @@ int sja1105_vl_redirect(struct sja1105_private *priv, int port,
bool append)
{
struct sja1105_rule *rule = sja1105_rule_find(priv, cookie);
+ struct dsa_port *dp = dsa_to_port(priv->ds, port);
+ bool vlan_aware = dsa_port_is_vlan_filtering(dp);
int rc;
- if (!priv->vlan_aware && key->type != SJA1105_KEY_VLAN_UNAWARE_VL) {
+ if (!vlan_aware && key->type != SJA1105_KEY_VLAN_UNAWARE_VL) {
NL_SET_ERR_MSG_MOD(extack,
"Can only redirect based on DMAC");
return -EOPNOTSUPP;
- } else if (priv->vlan_aware && key->type != SJA1105_KEY_VLAN_AWARE_VL) {
+ } else if (vlan_aware && key->type != SJA1105_KEY_VLAN_AWARE_VL) {
NL_SET_ERR_MSG_MOD(extack,
"Can only redirect based on {DMAC, VID, PCP}");
return -EOPNOTSUPP;
@@ -568,6 +571,8 @@ int sja1105_vl_gate(struct sja1105_private *priv, int port,
u32 num_entries, struct action_gate_entry *entries)
{
struct sja1105_rule *rule = sja1105_rule_find(priv, cookie);
+ struct dsa_port *dp = dsa_to_port(priv->ds, port);
+ bool vlan_aware = dsa_port_is_vlan_filtering(dp);
int ipv = -1;
int i, rc;
s32 rem;
@@ -592,11 +597,11 @@ int sja1105_vl_gate(struct sja1105_private *priv, int port,
return -ERANGE;
}
- if (!priv->vlan_aware && key->type != SJA1105_KEY_VLAN_UNAWARE_VL) {
+ if (!vlan_aware && key->type != SJA1105_KEY_VLAN_UNAWARE_VL) {
NL_SET_ERR_MSG_MOD(extack,
"Can only gate based on DMAC");
return -EOPNOTSUPP;
- } else if (priv->vlan_aware && key->type != SJA1105_KEY_VLAN_AWARE_VL) {
+ } else if (vlan_aware && key->type != SJA1105_KEY_VLAN_AWARE_VL) {
NL_SET_ERR_MSG_MOD(extack,
"Can only gate based on {DMAC, VID, PCP}");
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/3com/3c509.c b/drivers/net/ethernet/3com/3c509.c
index 87c906e744fb..846fa3af4504 100644
--- a/drivers/net/ethernet/3com/3c509.c
+++ b/drivers/net/ethernet/3com/3c509.c
@@ -270,7 +270,7 @@ static void el3_dev_fill(struct net_device *dev, __be16 *phys_addr, int ioaddr,
{
struct el3_private *lp = netdev_priv(dev);
- memcpy(dev->dev_addr, phys_addr, ETH_ALEN);
+ eth_hw_addr_set(dev, (u8 *)phys_addr);
dev->base_addr = ioaddr;
dev->irq = irq;
dev->if_port = if_port;
diff --git a/drivers/net/ethernet/3com/3c515.c b/drivers/net/ethernet/3com/3c515.c
index 6f0ea2facea9..1d124b0f65e7 100644
--- a/drivers/net/ethernet/3com/3c515.c
+++ b/drivers/net/ethernet/3com/3c515.c
@@ -567,6 +567,7 @@ static int corkscrew_setup(struct net_device *dev, int ioaddr,
{
struct corkscrew_private *vp = netdev_priv(dev);
unsigned int eeprom[0x40], checksum = 0; /* EEPROM contents */
+ __be16 addr[ETH_ALEN / 2];
int i;
int irq;
@@ -619,7 +620,6 @@ static int corkscrew_setup(struct net_device *dev, int ioaddr,
/* Read the station address from the EEPROM. */
EL3WINDOW(0);
for (i = 0; i < 0x18; i++) {
- __be16 *phys_addr = (__be16 *) dev->dev_addr;
int timer;
outw(EEPROM_Read + i, ioaddr + Wn0EepromCmd);
/* Pause for at least 162 us. for the read to take place. */
@@ -631,8 +631,9 @@ static int corkscrew_setup(struct net_device *dev, int ioaddr,
eeprom[i] = inw(ioaddr + Wn0EepromData);
checksum ^= eeprom[i];
if (i < 3)
- phys_addr[i] = htons(eeprom[i]);
+ addr[i] = htons(eeprom[i]);
}
+ eth_hw_addr_set(dev, (u8 *)addr);
checksum = (checksum ^ (checksum >> 8)) & 0xff;
if (checksum != 0x00)
pr_cont(" ***INVALID CHECKSUM %4.4x*** ", checksum);
diff --git a/drivers/net/ethernet/3com/3c574_cs.c b/drivers/net/ethernet/3com/3c574_cs.c
index dd4d3c48b98d..dc3b7c960611 100644
--- a/drivers/net/ethernet/3com/3c574_cs.c
+++ b/drivers/net/ethernet/3com/3c574_cs.c
@@ -305,15 +305,13 @@ static int tc574_config(struct pcmcia_device *link)
struct net_device *dev = link->priv;
struct el3_private *lp = netdev_priv(dev);
int ret, i, j;
+ __be16 addr[ETH_ALEN / 2];
unsigned int ioaddr;
- __be16 *phys_addr;
char *cardname;
__u32 config;
u8 *buf;
size_t len;
- phys_addr = (__be16 *)dev->dev_addr;
-
dev_dbg(&link->dev, "3c574_config()\n");
link->io_lines = 16;
@@ -347,19 +345,20 @@ static int tc574_config(struct pcmcia_device *link)
len = pcmcia_get_tuple(link, 0x88, &buf);
if (buf && len >= 6) {
for (i = 0; i < 3; i++)
- phys_addr[i] = htons(le16_to_cpu(buf[i * 2]));
+ addr[i] = htons(le16_to_cpu(buf[i * 2]));
kfree(buf);
} else {
kfree(buf); /* 0 < len < 6 */
EL3WINDOW(0);
for (i = 0; i < 3; i++)
- phys_addr[i] = htons(read_eeprom(ioaddr, i + 10));
- if (phys_addr[0] == htons(0x6060)) {
+ addr[i] = htons(read_eeprom(ioaddr, i + 10));
+ if (addr[0] == htons(0x6060)) {
pr_notice("IO port conflict at 0x%03lx-0x%03lx\n",
dev->base_addr, dev->base_addr+15);
goto failed;
}
}
+ eth_hw_addr_set(dev, (u8 *)addr);
if (link->prod_id[1])
cardname = link->prod_id[1];
else
diff --git a/drivers/net/ethernet/3com/3c589_cs.c b/drivers/net/ethernet/3com/3c589_cs.c
index 09816e84314d..4673bc1604e7 100644
--- a/drivers/net/ethernet/3com/3c589_cs.c
+++ b/drivers/net/ethernet/3com/3c589_cs.c
@@ -237,8 +237,8 @@ static void tc589_detach(struct pcmcia_device *link)
static int tc589_config(struct pcmcia_device *link)
{
struct net_device *dev = link->priv;
- __be16 *phys_addr;
int ret, i, j, multi = 0, fifo;
+ __be16 addr[ETH_ALEN / 2];
unsigned int ioaddr;
static const char * const ram_split[] = {"5:3", "3:1", "1:1", "3:5"};
u8 *buf;
@@ -246,7 +246,6 @@ static int tc589_config(struct pcmcia_device *link)
dev_dbg(&link->dev, "3c589_config\n");
- phys_addr = (__be16 *)dev->dev_addr;
/* Is this a 3c562? */
if (link->manf_id != MANFID_3COM)
dev_info(&link->dev, "hmmm, is this really a 3Com card??\n");
@@ -285,18 +284,19 @@ static int tc589_config(struct pcmcia_device *link)
len = pcmcia_get_tuple(link, 0x88, &buf);
if (buf && len >= 6) {
for (i = 0; i < 3; i++)
- phys_addr[i] = htons(le16_to_cpu(buf[i*2]));
+ addr[i] = htons(le16_to_cpu(buf[i*2]));
kfree(buf);
} else {
kfree(buf); /* 0 < len < 6 */
for (i = 0; i < 3; i++)
- phys_addr[i] = htons(read_eeprom(ioaddr, i));
- if (phys_addr[0] == htons(0x6060)) {
+ addr[i] = htons(read_eeprom(ioaddr, i));
+ if (addr[0] == htons(0x6060)) {
dev_err(&link->dev, "IO port conflict at 0x%03lx-0x%03lx\n",
dev->base_addr, dev->base_addr+15);
goto failed;
}
}
+ eth_hw_addr_set(dev, (u8 *)addr);
/* The address and resource configuration register aren't loaded from
* the EEPROM and *must* be set to 0 and IRQ3 for the PCMCIA version.
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index 7b0ae9efc004..ccf07667aa5e 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -1091,6 +1091,7 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq,
struct vortex_private *vp;
int option;
unsigned int eeprom[0x40], checksum = 0; /* EEPROM contents */
+ __be16 addr[ETH_ALEN / 2];
int i, step;
struct net_device *dev;
static int printed_version;
@@ -1284,7 +1285,8 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq,
if ((checksum != 0x00) && !(vci->drv_flags & IS_TORNADO))
pr_cont(" ***INVALID CHECKSUM %4.4x*** ", checksum);
for (i = 0; i < 3; i++)
- ((__be16 *)dev->dev_addr)[i] = htons(eeprom[i + 10]);
+ addr[i] = htons(eeprom[i + 10]);
+ eth_hw_addr_set(dev, (u8 *)addr);
if (print_info)
pr_cont(" %pM", dev->dev_addr);
/* Unfortunately an all zero eeprom passes the checksum and this
diff --git a/drivers/net/ethernet/8390/apne.c b/drivers/net/ethernet/8390/apne.c
index da1ae37a9d73..991ad953aa79 100644
--- a/drivers/net/ethernet/8390/apne.c
+++ b/drivers/net/ethernet/8390/apne.c
@@ -320,8 +320,7 @@ static int __init apne_probe1(struct net_device *dev, int ioaddr)
i = request_irq(dev->irq, apne_interrupt, IRQF_SHARED, DRV_NAME, dev);
if (i) return i;
- for (i = 0; i < ETH_ALEN; i++)
- dev->dev_addr[i] = SA_prom[i];
+ eth_hw_addr_set(dev, SA_prom);
pr_cont(" %pM\n", dev->dev_addr);
diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
index 6c6bdd5913ec..1f8acbba5b6b 100644
--- a/drivers/net/ethernet/8390/ax88796.c
+++ b/drivers/net/ethernet/8390/ax88796.c
@@ -716,7 +716,7 @@ static int ax_init_dev(struct net_device *dev)
for (i = 0; i < 16; i++)
SA_prom[i] = SA_prom[i+i];
- memcpy(dev->dev_addr, SA_prom, ETH_ALEN);
+ eth_hw_addr_set(dev, SA_prom);
}
#ifdef CONFIG_AX88796_93CX6
@@ -733,7 +733,7 @@ static int ax_init_dev(struct net_device *dev)
(__le16 __force *)mac_addr,
sizeof(mac_addr) >> 1);
- memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
+ eth_hw_addr_set(dev, mac_addr);
}
#endif
if (ax->plat->wordlength == 2) {
@@ -748,16 +748,18 @@ static int ax_init_dev(struct net_device *dev)
/* load the mac-address from the device */
if (ax->plat->flags & AXFLG_MAC_FROMDEV) {
+ u8 addr[ETH_ALEN];
+
ei_outb(E8390_NODMA + E8390_PAGE1 + E8390_STOP,
ei_local->mem + E8390_CMD); /* 0x61 */
for (i = 0; i < ETH_ALEN; i++)
- dev->dev_addr[i] =
- ei_inb(ioaddr + EN1_PHYS_SHIFT(i));
+ addr[i] = ei_inb(ioaddr + EN1_PHYS_SHIFT(i));
+ eth_hw_addr_set(dev, addr);
}
if ((ax->plat->flags & AXFLG_MAC_FROMPLATFORM) &&
ax->plat->mac_addr)
- memcpy(dev->dev_addr, ax->plat->mac_addr, ETH_ALEN);
+ eth_hw_addr_set(dev, ax->plat->mac_addr);
if (!is_valid_ether_addr(dev->dev_addr)) {
eth_hw_addr_random(dev);
diff --git a/drivers/net/ethernet/8390/axnet_cs.c b/drivers/net/ethernet/8390/axnet_cs.c
index 3c370e686ec3..3aef959fc25b 100644
--- a/drivers/net/ethernet/8390/axnet_cs.c
+++ b/drivers/net/ethernet/8390/axnet_cs.c
@@ -187,6 +187,7 @@ static int get_prom(struct pcmcia_device *link)
{
struct net_device *dev = link->priv;
unsigned int ioaddr = dev->base_addr;
+ u8 addr[ETH_ALEN];
int i, j;
/* This is based on drivers/net/ethernet/8390/ne.c */
@@ -220,9 +221,11 @@ static int get_prom(struct pcmcia_device *link)
for (i = 0; i < 6; i += 2) {
j = inw(ioaddr + AXNET_DATAPORT);
- dev->dev_addr[i] = j & 0xff;
- dev->dev_addr[i+1] = j >> 8;
+ addr[i] = j & 0xff;
+ addr[i+1] = j >> 8;
}
+ eth_hw_addr_set(dev, addr);
+
return 1;
} /* get_prom */
diff --git a/drivers/net/ethernet/8390/mcf8390.c b/drivers/net/ethernet/8390/mcf8390.c
index 4ad8031ab669..e320cccba61a 100644
--- a/drivers/net/ethernet/8390/mcf8390.c
+++ b/drivers/net/ethernet/8390/mcf8390.c
@@ -374,8 +374,7 @@ static int mcf8390_init(struct net_device *dev)
if (ret)
return ret;
- for (i = 0; i < ETH_ALEN; i++)
- dev->dev_addr[i] = SA_prom[i];
+ eth_hw_addr_set(dev, SA_prom);
netdev_dbg(dev, "Found ethernet address: %pM\n", dev->dev_addr);
diff --git a/drivers/net/ethernet/8390/ne.c b/drivers/net/ethernet/8390/ne.c
index 9afc712f5948..0a9118b8be0c 100644
--- a/drivers/net/ethernet/8390/ne.c
+++ b/drivers/net/ethernet/8390/ne.c
@@ -500,9 +500,7 @@ static int __init ne_probe1(struct net_device *dev, unsigned long ioaddr)
dev->base_addr = ioaddr;
- for (i = 0; i < ETH_ALEN; i++) {
- dev->dev_addr[i] = SA_prom[i];
- }
+ eth_hw_addr_set(dev, SA_prom);
pr_cont("%pM\n", dev->dev_addr);
diff --git a/drivers/net/ethernet/8390/ne2k-pci.c b/drivers/net/ethernet/8390/ne2k-pci.c
index d6715008e04d..6a0a2039600a 100644
--- a/drivers/net/ethernet/8390/ne2k-pci.c
+++ b/drivers/net/ethernet/8390/ne2k-pci.c
@@ -390,7 +390,7 @@ static int ne2k_pci_init_one(struct pci_dev *pdev,
dev->ethtool_ops = &ne2k_pci_ethtool_ops;
NS8390_init(dev, 0);
- memcpy(dev->dev_addr, SA_prom, dev->addr_len);
+ eth_hw_addr_set(dev, SA_prom);
i = register_netdev(dev);
if (i)
diff --git a/drivers/net/ethernet/8390/pcnet_cs.c b/drivers/net/ethernet/8390/pcnet_cs.c
index 96ad72abd373..0f07fe03da98 100644
--- a/drivers/net/ethernet/8390/pcnet_cs.c
+++ b/drivers/net/ethernet/8390/pcnet_cs.c
@@ -278,6 +278,7 @@ static struct hw_info *get_hwinfo(struct pcmcia_device *link)
{
struct net_device *dev = link->priv;
u_char __iomem *base, *virt;
+ u8 addr[ETH_ALEN];
int i, j;
/* Allocate a small memory window */
@@ -302,7 +303,8 @@ static struct hw_info *get_hwinfo(struct pcmcia_device *link)
(readb(base+2) == hw_info[i].a1) &&
(readb(base+4) == hw_info[i].a2)) {
for (j = 0; j < 6; j++)
- dev->dev_addr[j] = readb(base + (j<<1));
+ addr[j] = readb(base + (j<<1));
+ eth_hw_addr_set(dev, addr);
break;
}
}
@@ -324,6 +326,7 @@ static struct hw_info *get_prom(struct pcmcia_device *link)
{
struct net_device *dev = link->priv;
unsigned int ioaddr = dev->base_addr;
+ u8 addr[ETH_ALEN];
u_char prom[32];
int i, j;
@@ -362,7 +365,8 @@ static struct hw_info *get_prom(struct pcmcia_device *link)
}
if ((i < NR_INFO) || ((prom[28] == 0x57) && (prom[30] == 0x57))) {
for (j = 0; j < 6; j++)
- dev->dev_addr[j] = prom[j<<1];
+ addr[j] = prom[j<<1];
+ eth_hw_addr_set(dev, addr);
return (i < NR_INFO) ? hw_info+i : &default_info;
}
return NULL;
@@ -377,6 +381,7 @@ static struct hw_info *get_prom(struct pcmcia_device *link)
static struct hw_info *get_dl10019(struct pcmcia_device *link)
{
struct net_device *dev = link->priv;
+ u8 addr[ETH_ALEN];
int i;
u_char sum;
@@ -385,7 +390,8 @@ static struct hw_info *get_dl10019(struct pcmcia_device *link)
if (sum != 0xff)
return NULL;
for (i = 0; i < 6; i++)
- dev->dev_addr[i] = inb_p(dev->base_addr + 0x14 + i);
+ addr[i] = inb_p(dev->base_addr + 0x14 + i);
+ eth_hw_addr_set(dev, addr);
i = inb(dev->base_addr + 0x1f);
return ((i == 0x91)||(i == 0x99)) ? &dl10022_info : &dl10019_info;
}
@@ -400,6 +406,7 @@ static struct hw_info *get_ax88190(struct pcmcia_device *link)
{
struct net_device *dev = link->priv;
unsigned int ioaddr = dev->base_addr;
+ u8 addr[ETH_ALEN];
int i, j;
/* Not much of a test, but the alternatives are messy */
@@ -413,9 +420,10 @@ static struct hw_info *get_ax88190(struct pcmcia_device *link)
for (i = 0; i < 6; i += 2) {
j = inw(ioaddr + PCNET_DATAPORT);
- dev->dev_addr[i] = j & 0xff;
- dev->dev_addr[i+1] = j >> 8;
+ addr[i] = j & 0xff;
+ addr[i+1] = j >> 8;
}
+ eth_hw_addr_set(dev, addr);
return NULL;
}
@@ -430,6 +438,7 @@ static struct hw_info *get_ax88190(struct pcmcia_device *link)
static struct hw_info *get_hwired(struct pcmcia_device *link)
{
struct net_device *dev = link->priv;
+ u8 addr[ETH_ALEN];
int i;
for (i = 0; i < 6; i++)
@@ -438,7 +447,8 @@ static struct hw_info *get_hwired(struct pcmcia_device *link)
return NULL;
for (i = 0; i < 6; i++)
- dev->dev_addr[i] = hw_addr[i];
+ addr[i] = hw_addr[i];
+ eth_hw_addr_set(dev, addr);
return &default_info;
} /* get_hwired */
diff --git a/drivers/net/ethernet/8390/stnic.c b/drivers/net/ethernet/8390/stnic.c
index fbbd7f22c142..bd89ca8a92df 100644
--- a/drivers/net/ethernet/8390/stnic.c
+++ b/drivers/net/ethernet/8390/stnic.c
@@ -104,8 +104,8 @@ STNIC_WRITE (int reg, byte val)
static int __init stnic_probe(void)
{
struct net_device *dev;
- int i, err;
struct ei_device *ei_local;
+ int err;
/* If we are not running on a SolutionEngine, give up now */
if (! MACH_SE)
@@ -119,8 +119,7 @@ static int __init stnic_probe(void)
#ifdef CONFIG_SH_STANDARD_BIOS
sh_bios_get_node_addr (stnic_eadr);
#endif
- for (i = 0; i < ETH_ALEN; i++)
- dev->dev_addr[i] = stnic_eadr[i];
+ eth_hw_addr_set(dev, stnic_eadr);
/* Set the base address to point to the NIC, not the "real" base! */
dev->base_addr = 0x1000;
diff --git a/drivers/net/ethernet/8390/zorro8390.c b/drivers/net/ethernet/8390/zorro8390.c
index 35a500a21521..e8b4fe813a08 100644
--- a/drivers/net/ethernet/8390/zorro8390.c
+++ b/drivers/net/ethernet/8390/zorro8390.c
@@ -364,8 +364,7 @@ static int zorro8390_init(struct net_device *dev, unsigned long board,
if (i)
return i;
- for (i = 0; i < ETH_ALEN; i++)
- dev->dev_addr[i] = SA_prom[i];
+ eth_hw_addr_set(dev, SA_prom);
pr_debug("Found ethernet address: %pM\n", dev->dev_addr);
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 412ae3e43ffb..4601b38f532a 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -33,6 +33,7 @@ source "drivers/net/ethernet/apm/Kconfig"
source "drivers/net/ethernet/apple/Kconfig"
source "drivers/net/ethernet/aquantia/Kconfig"
source "drivers/net/ethernet/arc/Kconfig"
+source "drivers/net/ethernet/asix/Kconfig"
source "drivers/net/ethernet/atheros/Kconfig"
source "drivers/net/ethernet/broadcom/Kconfig"
source "drivers/net/ethernet/brocade/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index aaa5078cd7d1..fdd8c6c17451 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_NET_XGENE) += apm/
obj-$(CONFIG_NET_VENDOR_APPLE) += apple/
obj-$(CONFIG_NET_VENDOR_AQUANTIA) += aquantia/
obj-$(CONFIG_NET_VENDOR_ARC) += arc/
+obj-$(CONFIG_NET_VENDOR_ASIX) += asix/
obj-$(CONFIG_NET_VENDOR_ATHEROS) += atheros/
obj-$(CONFIG_NET_VENDOR_CADENCE) += cadence/
obj-$(CONFIG_NET_VENDOR_BROADCOM) += broadcom/
diff --git a/drivers/net/ethernet/actions/owl-emac.c b/drivers/net/ethernet/actions/owl-emac.c
index c4ecf4fcadf8..1cfdd01b4c2e 100644
--- a/drivers/net/ethernet/actions/owl-emac.c
+++ b/drivers/net/ethernet/actions/owl-emac.c
@@ -342,7 +342,7 @@ static u32 owl_emac_dma_cmd_stop(struct owl_emac_priv *priv)
static void owl_emac_set_hw_mac_addr(struct net_device *netdev)
{
struct owl_emac_priv *priv = netdev_priv(netdev);
- u8 *mac_addr = netdev->dev_addr;
+ const u8 *mac_addr = netdev->dev_addr;
u32 addr_high, addr_low;
addr_high = mac_addr[0] << 8 | mac_addr[1];
@@ -1173,7 +1173,7 @@ static int owl_emac_ndo_set_mac_addr(struct net_device *netdev, void *addr)
if (netif_running(netdev))
return -EBUSY;
- memcpy(netdev->dev_addr, skaddr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, skaddr->sa_data);
owl_emac_set_hw_mac_addr(netdev);
return owl_emac_setup_frame_xmit(netdev_priv(netdev));
@@ -1385,7 +1385,7 @@ static void owl_emac_get_mac_addr(struct net_device *netdev)
struct device *dev = netdev->dev.parent;
int ret;
- ret = eth_platform_get_mac_address(dev, netdev->dev_addr);
+ ret = platform_get_ethdev_address(dev, netdev);
if (!ret && is_valid_ether_addr(netdev->dev_addr))
return;
diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c
index e0f6cc910bd2..c6982f7caf9b 100644
--- a/drivers/net/ethernet/adaptec/starfire.c
+++ b/drivers/net/ethernet/adaptec/starfire.c
@@ -641,6 +641,7 @@ static int starfire_init_one(struct pci_dev *pdev,
struct netdev_private *np;
int i, irq, chip_idx = ent->driver_data;
struct net_device *dev;
+ u8 addr[ETH_ALEN];
long ioaddr;
void __iomem *base;
int drv_flags, io_size;
@@ -696,7 +697,8 @@ static int starfire_init_one(struct pci_dev *pdev,
/* Serial EEPROM reads are hidden by the hardware. */
for (i = 0; i < 6; i++)
- dev->dev_addr[i] = readb(base + EEPROMCtrl + 20 - i);
+ addr[i] = readb(base + EEPROMCtrl + 20 - i);
+ eth_hw_addr_set(dev, addr);
#if ! defined(final_version) /* Dump the EEPROM contents during development. */
if (debug > 4)
@@ -955,7 +957,7 @@ static int netdev_open(struct net_device *dev)
writew(0, ioaddr + PerfFilterTable + 4);
writew(0, ioaddr + PerfFilterTable + 8);
for (i = 1; i < 16; i++) {
- __be16 *eaddrs = (__be16 *)dev->dev_addr;
+ const __be16 *eaddrs = (const __be16 *)dev->dev_addr;
void __iomem *setup_frm = ioaddr + PerfFilterTable + i * 16;
writew(be16_to_cpu(eaddrs[2]), setup_frm); setup_frm += 4;
writew(be16_to_cpu(eaddrs[1]), setup_frm); setup_frm += 4;
@@ -1787,14 +1789,14 @@ static void set_rx_mode(struct net_device *dev)
} else if (netdev_mc_count(dev) <= 14) {
/* Use the 16 element perfect filter, skip first two entries. */
void __iomem *filter_addr = ioaddr + PerfFilterTable + 2 * 16;
- __be16 *eaddrs;
+ const __be16 *eaddrs;
netdev_for_each_mc_addr(ha, dev) {
eaddrs = (__be16 *) ha->addr;
writew(be16_to_cpu(eaddrs[2]), filter_addr); filter_addr += 4;
writew(be16_to_cpu(eaddrs[1]), filter_addr); filter_addr += 4;
writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 8;
}
- eaddrs = (__be16 *)dev->dev_addr;
+ eaddrs = (const __be16 *)dev->dev_addr;
i = netdev_mc_count(dev) + 2;
while (i++ < 16) {
writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 4;
@@ -1805,7 +1807,7 @@ static void set_rx_mode(struct net_device *dev)
} else {
/* Must use a multicast hash table. */
void __iomem *filter_addr;
- __be16 *eaddrs;
+ const __be16 *eaddrs;
__le16 mc_filter[32] __attribute__ ((aligned(sizeof(long)))); /* Multicast hash filter */
memset(mc_filter, 0, sizeof(mc_filter));
@@ -1819,7 +1821,7 @@ static void set_rx_mode(struct net_device *dev)
}
/* Clear the perfect filter list, skip first two entries. */
filter_addr = ioaddr + PerfFilterTable + 2 * 16;
- eaddrs = (__be16 *)dev->dev_addr;
+ eaddrs = (const __be16 *)dev->dev_addr;
for (i = 2; i < 16; i++) {
writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 4;
writew(be16_to_cpu(eaddrs[1]), filter_addr); filter_addr += 4;
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
index c560ad06f0be..447dc64a17e5 100644
--- a/drivers/net/ethernet/aeroflex/greth.c
+++ b/drivers/net/ethernet/aeroflex/greth.c
@@ -1025,7 +1025,7 @@ static int greth_set_mac_add(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, addr->sa_data);
GRETH_REGSAVE(regs->esa_msb, dev->dev_addr[0] << 8 | dev->dev_addr[1]);
GRETH_REGSAVE(regs->esa_lsb, dev->dev_addr[2] << 24 | dev->dev_addr[3] << 16 |
dev->dev_addr[4] << 8 | dev->dev_addr[5]);
@@ -1346,6 +1346,7 @@ static int greth_of_probe(struct platform_device *ofdev)
int i;
int err;
int tmp;
+ u8 addr[ETH_ALEN];
unsigned long timeout;
dev = alloc_etherdev(sizeof(struct greth_private));
@@ -1449,8 +1450,6 @@ static int greth_of_probe(struct platform_device *ofdev)
break;
}
if (i == 6) {
- u8 addr[ETH_ALEN];
-
err = of_get_mac_address(ofdev->dev.of_node, addr);
if (!err) {
for (i = 0; i < 6; i++)
@@ -1464,7 +1463,8 @@ static int greth_of_probe(struct platform_device *ofdev)
}
for (i = 0; i < 6; i++)
- dev->dev_addr[i] = macaddr[i];
+ addr[i] = macaddr[i];
+ eth_hw_addr_set(dev, addr);
macaddr[5]++;
diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c
index 920633161174..f4edc616388c 100644
--- a/drivers/net/ethernet/agere/et131x.c
+++ b/drivers/net/ethernet/agere/et131x.c
@@ -3863,7 +3863,7 @@ static int et131x_change_mtu(struct net_device *netdev, int new_mtu)
et131x_init_send(adapter);
et131x_hwaddr_init(adapter);
- ether_addr_copy(netdev->dev_addr, adapter->addr);
+ eth_hw_addr_set(netdev, adapter->addr);
/* Init the device with the new settings */
et131x_adapter_setup(adapter);
@@ -3966,7 +3966,7 @@ static int et131x_pci_setup(struct pci_dev *pdev,
netif_napi_add(netdev, &adapter->napi, et131x_poll, 64);
- ether_addr_copy(netdev->dev_addr, adapter->addr);
+ eth_hw_addr_set(netdev, adapter->addr);
rc = -ENOMEM;
diff --git a/drivers/net/ethernet/alacritech/slicoss.c b/drivers/net/ethernet/alacritech/slicoss.c
index 696517eae77f..1fc9a1cd3ef8 100644
--- a/drivers/net/ethernet/alacritech/slicoss.c
+++ b/drivers/net/ethernet/alacritech/slicoss.c
@@ -1008,7 +1008,7 @@ static void slic_set_link_autoneg(struct slic_device *sdev)
static void slic_set_mac_address(struct slic_device *sdev)
{
- u8 *addr = sdev->netdev->dev_addr;
+ const u8 *addr = sdev->netdev->dev_addr;
u32 val;
val = addr[5] | addr[4] << 8 | addr[3] << 16 | addr[2] << 24;
@@ -1660,7 +1660,7 @@ static int slic_read_eeprom(struct slic_device *sdev)
goto free_eeprom;
}
/* set mac address */
- ether_addr_copy(sdev->netdev->dev_addr, mac[devfn]);
+ eth_hw_addr_set(sdev->netdev, mac[devfn]);
free_eeprom:
dma_free_coherent(&sdev->pdev->dev, SLIC_EEPROM_SIZE, eeprom, paddr);
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
index 037baea1c738..800ee022388f 100644
--- a/drivers/net/ethernet/allwinner/sun4i-emac.c
+++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
@@ -356,7 +356,7 @@ static int emac_set_mac_address(struct net_device *dev, void *p)
if (netif_running(dev))
return -EBUSY;
- memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
+ eth_hw_addr_set(dev, addr->sa_data);
writel(dev->dev_addr[0] << 16 | dev->dev_addr[1] << 8 | dev->
dev_addr[2], db->membase + EMAC_MAC_A1_REG);
@@ -852,7 +852,7 @@ static int emac_probe(struct platform_device *pdev)
}
/* Read MAC-address from DT */
- ret = of_get_mac_address(np, ndev->dev_addr);
+ ret = of_get_ethdev_address(np, ndev);
if (ret) {
/* if the MAC address is invalid get a random one */
eth_hw_addr_random(ndev);
diff --git a/drivers/net/ethernet/alteon/acenic.c b/drivers/net/ethernet/alteon/acenic.c
index 9dc12b13061f..732da15a3827 100644
--- a/drivers/net/ethernet/alteon/acenic.c
+++ b/drivers/net/ethernet/alteon/acenic.c
@@ -869,6 +869,7 @@ static int ace_init(struct net_device *dev)
int board_idx, ecode = 0;
short i;
unsigned char cache_size;
+ u8 addr[ETH_ALEN];
ap = netdev_priv(dev);
regs = ap->regs;
@@ -988,12 +989,13 @@ static int ace_init(struct net_device *dev)
writel(mac1, &regs->MacAddrHi);
writel(mac2, &regs->MacAddrLo);
- dev->dev_addr[0] = (mac1 >> 8) & 0xff;
- dev->dev_addr[1] = mac1 & 0xff;
- dev->dev_addr[2] = (mac2 >> 24) & 0xff;
- dev->dev_addr[3] = (mac2 >> 16) & 0xff;
- dev->dev_addr[4] = (mac2 >> 8) & 0xff;
- dev->dev_addr[5] = mac2 & 0xff;
+ addr[0] = (mac1 >> 8) & 0xff;
+ addr[1] = mac1 & 0xff;
+ addr[2] = (mac2 >> 24) & 0xff;
+ addr[3] = (mac2 >> 16) & 0xff;
+ addr[4] = (mac2 >> 8) & 0xff;
+ addr[5] = mac2 & 0xff;
+ eth_hw_addr_set(dev, addr);
printk("MAC: %pM\n", dev->dev_addr);
@@ -2712,15 +2714,15 @@ static int ace_set_mac_addr(struct net_device *dev, void *p)
struct ace_private *ap = netdev_priv(dev);
struct ace_regs __iomem *regs = ap->regs;
struct sockaddr *addr=p;
- u8 *da;
+ const u8 *da;
struct cmd cmd;
if(netif_running(dev))
return -EBUSY;
- memcpy(dev->dev_addr, addr->sa_data,dev->addr_len);
+ eth_hw_addr_set(dev, addr->sa_data);
- da = (u8 *)dev->dev_addr;
+ da = (const u8 *)dev->dev_addr;
writel(da[0] << 8 | da[1], &regs->MacAddrHi);
writel((da[2] << 24) | (da[3] << 16) | (da[4] << 8) | da[5],
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index 1c00d719e5d7..d75d95a97dd9 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -849,7 +849,7 @@ static int init_phy(struct net_device *dev)
return 0;
}
-static void tse_update_mac_addr(struct altera_tse_private *priv, u8 *addr)
+static void tse_update_mac_addr(struct altera_tse_private *priv, const u8 *addr)
{
u32 msb;
u32 lsb;
@@ -1524,7 +1524,7 @@ static int altera_tse_probe(struct platform_device *pdev)
priv->rx_dma_buf_sz = ALTERA_RXDMABUFFER_SIZE;
/* get default MAC address from device tree */
- ret = of_get_mac_address(pdev->dev.of_node, ndev->dev_addr);
+ ret = of_get_ethdev_address(pdev->dev.of_node, ndev);
if (ret)
eth_hw_addr_random(ndev);
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 0e43000614ab..7d5d885d85d5 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -4073,7 +4073,7 @@ static void ena_set_conf_feat_params(struct ena_adapter *adapter,
ether_addr_copy(adapter->mac_addr, netdev->dev_addr);
} else {
ether_addr_copy(adapter->mac_addr, feat->dev_attr.mac_addr);
- ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
+ eth_hw_addr_set(netdev, adapter->mac_addr);
}
/* Set offload features */
diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig
index 4786f0504691..899c8a2a34b6 100644
--- a/drivers/net/ethernet/amd/Kconfig
+++ b/drivers/net/ethernet/amd/Kconfig
@@ -168,7 +168,7 @@ config SUNLANCE
config AMD_XGBE
tristate "AMD 10GbE Ethernet driver"
- depends on ((OF_NET && OF_ADDRESS) || ACPI || PCI) && HAS_IOMEM
+ depends on (OF_ADDRESS || ACPI || PCI) && HAS_IOMEM
depends on X86 || ARM64 || COMPILE_TEST
depends on PTP_1588_CLOCK_OPTIONAL
select BITREVERSE
diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c
index 92e4246dc359..9421afb950f7 100644
--- a/drivers/net/ethernet/amd/amd8111e.c
+++ b/drivers/net/ethernet/amd/amd8111e.c
@@ -1500,7 +1500,7 @@ static int amd8111e_set_mac_address(struct net_device *dev, void *p)
int i;
struct sockaddr *addr = p;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, addr->sa_data);
spin_lock_irq(&lp->lock);
/* Setting the MAC address to the device */
for (i = 0; i < ETH_ALEN; i++)
@@ -1743,6 +1743,7 @@ static int amd8111e_probe_one(struct pci_dev *pdev,
unsigned long reg_addr, reg_len;
struct amd8111e_priv *lp;
struct net_device *dev;
+ u8 addr[ETH_ALEN];
err = pci_enable_device(pdev);
if (err) {
@@ -1809,7 +1810,8 @@ static int amd8111e_probe_one(struct pci_dev *pdev,
/* Initializing MAC address */
for (i = 0; i < ETH_ALEN; i++)
- dev->dev_addr[i] = readb(lp->mmio + PADR + i);
+ addr[i] = readb(lp->mmio + PADR + i);
+ eth_hw_addr_set(dev, addr);
/* Setting user defined parametrs */
lp->ext_phy_option = speed_duplex[card_idx];
diff --git a/drivers/net/ethernet/amd/atarilance.c b/drivers/net/ethernet/amd/atarilance.c
index 9d2f49fd945e..9c7d9690d00c 100644
--- a/drivers/net/ethernet/amd/atarilance.c
+++ b/drivers/net/ethernet/amd/atarilance.c
@@ -582,7 +582,7 @@ static unsigned long __init lance_probe1( struct net_device *dev,
switch( lp->cardtype ) {
case OLD_RIEBL:
/* No ethernet address! (Set some default address) */
- memcpy(dev->dev_addr, OldRieblDefHwaddr, ETH_ALEN);
+ eth_hw_addr_set(dev, OldRieblDefHwaddr);
break;
case NEW_RIEBL:
lp->memcpy_f(dev->dev_addr, RIEBL_HWADDR_ADDR, ETH_ALEN);
@@ -1123,7 +1123,7 @@ static int lance_set_mac_address( struct net_device *dev, void *addr )
return -EIO;
}
- memcpy( dev->dev_addr, saddr->sa_data, dev->addr_len );
+ eth_hw_addr_set(dev, saddr->sa_data);
for( i = 0; i < 6; i++ )
MEM->init.hwaddr[i] = dev->dev_addr[i^1]; /* <- 16 bit swap! */
lp->memcpy_f( RIEBL_HWADDR_ADDR, dev->dev_addr, 6 );
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
index 9c1636222b99..c6f003975621 100644
--- a/drivers/net/ethernet/amd/au1000_eth.c
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -1178,7 +1178,7 @@ static int au1000_probe(struct platform_device *pdev)
aup->phy1_search_mac0 = 1;
} else {
if (is_valid_ether_addr(pd->mac)) {
- memcpy(dev->dev_addr, pd->mac, ETH_ALEN);
+ eth_hw_addr_set(dev, pd->mac);
} else {
/* Set a random MAC since no valid provided by platform_data. */
eth_hw_addr_random(dev);
diff --git a/drivers/net/ethernet/amd/nmclan_cs.c b/drivers/net/ethernet/amd/nmclan_cs.c
index 4019cab87505..30ee5329bd7c 100644
--- a/drivers/net/ethernet/amd/nmclan_cs.c
+++ b/drivers/net/ethernet/amd/nmclan_cs.c
@@ -529,7 +529,8 @@ static void mace_write(mace_private *lp, unsigned int ioaddr, int reg,
mace_init
Resets the MACE chip.
---------------------------------------------------------------------------- */
-static int mace_init(mace_private *lp, unsigned int ioaddr, char *enet_addr)
+static int mace_init(mace_private *lp, unsigned int ioaddr,
+ const char *enet_addr)
{
int i;
int ct = 0;
@@ -635,7 +636,7 @@ static int nmclan_config(struct pcmcia_device *link)
kfree(buf);
goto failed;
}
- memcpy(dev->dev_addr, buf, ETH_ALEN);
+ eth_hw_addr_set(dev, buf);
kfree(buf);
/* Verify configuration by reading the MACE ID. */
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index 70d76fdb9f56..f5c50ff377ff 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -1595,6 +1595,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
struct net_device *dev;
const struct pcnet32_access *a = NULL;
u8 promaddr[ETH_ALEN];
+ u8 addr[ETH_ALEN];
int ret = -ENODEV;
/* reset the chip */
@@ -1760,9 +1761,10 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
unsigned int val;
val = a->read_csr(ioaddr, i + 12) & 0x0ffff;
/* There may be endianness issues here. */
- dev->dev_addr[2 * i] = val & 0x0ff;
- dev->dev_addr[2 * i + 1] = (val >> 8) & 0x0ff;
+ addr[2 * i] = val & 0x0ff;
+ addr[2 * i + 1] = (val >> 8) & 0x0ff;
}
+ eth_hw_addr_set(dev, addr);
/* read PROM address and compare with CSR address */
for (i = 0; i < ETH_ALEN; i++)
@@ -1775,13 +1777,16 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
pr_cont(" warning: CSR address invalid,\n");
pr_info(" using instead PROM address of");
}
- memcpy(dev->dev_addr, promaddr, ETH_ALEN);
+ eth_hw_addr_set(dev, promaddr);
}
}
/* if the ethernet address is not valid, force to 00:00:00:00:00:00 */
- if (!is_valid_ether_addr(dev->dev_addr))
- eth_zero_addr(dev->dev_addr);
+ if (!is_valid_ether_addr(dev->dev_addr)) {
+ static const u8 zero_addr[ETH_ALEN] = {};
+
+ eth_hw_addr_set(dev, zero_addr);
+ }
if (pcnet32_debug & NETIF_MSG_PROBE) {
pr_cont(" %pM", dev->dev_addr);
diff --git a/drivers/net/ethernet/amd/sun3lance.c b/drivers/net/ethernet/amd/sun3lance.c
index 4a845bc071b2..007bd7787291 100644
--- a/drivers/net/ethernet/amd/sun3lance.c
+++ b/drivers/net/ethernet/amd/sun3lance.c
@@ -305,7 +305,6 @@ static int __init lance_probe( struct net_device *dev)
unsigned long ioaddr;
struct lance_private *lp;
- int i;
static int did_version;
volatile unsigned short *ioaddr_probe;
unsigned short tmp1, tmp2;
@@ -373,8 +372,7 @@ static int __init lance_probe( struct net_device *dev)
dev->irq);
/* copy in the ethernet address from the prom */
- for(i = 0; i < 6 ; i++)
- dev->dev_addr[i] = idprom->id_ethaddr[i];
+ eth_hw_addr_set(dev, idprom->id_ethaddr);
/* tell the card it's ether address, bytes swapped */
MEM->init.hwaddr[0] = dev->dev_addr[1];
diff --git a/drivers/net/ethernet/amd/sunlance.c b/drivers/net/ethernet/amd/sunlance.c
index ddece276ae23..22d609563af8 100644
--- a/drivers/net/ethernet/amd/sunlance.c
+++ b/drivers/net/ethernet/amd/sunlance.c
@@ -1301,7 +1301,6 @@ static int sparc_lance_probe_one(struct platform_device *op,
struct device_node *dp = op->dev.of_node;
struct lance_private *lp;
struct net_device *dev;
- int i;
dev = alloc_etherdev(sizeof(struct lance_private) + 8);
if (!dev)
@@ -1315,8 +1314,7 @@ static int sparc_lance_probe_one(struct platform_device *op,
* will copy the address in the device structure to the lance
* initialization block.
*/
- for (i = 0; i < 6; i++)
- dev->dev_addr[i] = idprom->id_ethaddr[i];
+ eth_hw_addr_set(dev, idprom->id_ethaddr);
/* Get the IO region */
lp->lregs = of_ioremap(&op->resource[0], 0,
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index d5fd49dd25f3..3936543a74d8 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -1080,7 +1080,7 @@ static int xgbe_add_mac_addresses(struct xgbe_prv_data *pdata)
return 0;
}
-static int xgbe_set_mac_address(struct xgbe_prv_data *pdata, u8 *addr)
+static int xgbe_set_mac_address(struct xgbe_prv_data *pdata, const u8 *addr)
{
unsigned int mac_addr_hi, mac_addr_lo;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 17a585adfb49..30d24d19f40d 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -1912,10 +1912,8 @@ static int xgbe_close(struct net_device *netdev)
clk_disable_unprepare(pdata->ptpclk);
clk_disable_unprepare(pdata->sysclk);
- flush_workqueue(pdata->an_workqueue);
destroy_workqueue(pdata->an_workqueue);
- flush_workqueue(pdata->dev_workqueue);
destroy_workqueue(pdata->dev_workqueue);
set_bit(XGBE_DOWN, &pdata->dev_state);
@@ -2016,7 +2014,7 @@ static int xgbe_set_mac_address(struct net_device *netdev, void *addr)
if (!is_valid_ether_addr(saddr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(netdev->dev_addr, saddr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, saddr->sa_data);
hw_if->set_mac_address(pdata, netdev->dev_addr);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
index a218dc6f2edd..0e8698928e4d 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
@@ -267,7 +267,7 @@ int xgbe_config_netdev(struct xgbe_prv_data *pdata)
netdev->irq = pdata->dev_irq;
netdev->base_addr = (unsigned long)pdata->xgmac_regs;
- memcpy(netdev->dev_addr, pdata->mac_addr, netdev->addr_len);
+ eth_hw_addr_set(netdev, pdata->mac_addr);
/* Initialize ECC timestamps */
pdata->tx_sec_period = jiffies;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
index 3305979a9f7c..607a2c90513b 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -729,7 +729,7 @@ struct xgbe_ext_stats {
struct xgbe_hw_if {
int (*tx_complete)(struct xgbe_ring_desc *);
- int (*set_mac_address)(struct xgbe_prv_data *, u8 *addr);
+ int (*set_mac_address)(struct xgbe_prv_data *, const u8 *addr);
int (*config_rx_mode)(struct xgbe_prv_data *);
int (*enable_rx_csum)(struct xgbe_prv_data *);
diff --git a/drivers/net/ethernet/apm/xgene-v2/mac.c b/drivers/net/ethernet/apm/xgene-v2/mac.c
index 2da979e4fad1..6423e22e05b2 100644
--- a/drivers/net/ethernet/apm/xgene-v2/mac.c
+++ b/drivers/net/ethernet/apm/xgene-v2/mac.c
@@ -65,7 +65,7 @@ void xge_mac_set_speed(struct xge_pdata *pdata)
void xge_mac_set_station_addr(struct xge_pdata *pdata)
{
- u8 *dev_addr = pdata->ndev->dev_addr;
+ const u8 *dev_addr = pdata->ndev->dev_addr;
u32 addr0, addr1;
addr0 = (dev_addr[3] << 24) | (dev_addr[2] << 16) |
diff --git a/drivers/net/ethernet/apm/xgene-v2/main.c b/drivers/net/ethernet/apm/xgene-v2/main.c
index 80399c8980bd..d022b6db9e06 100644
--- a/drivers/net/ethernet/apm/xgene-v2/main.c
+++ b/drivers/net/ethernet/apm/xgene-v2/main.c
@@ -36,7 +36,7 @@ static int xge_get_resources(struct xge_pdata *pdata)
return -ENOMEM;
}
- if (!device_get_mac_address(dev, ndev->dev_addr, ETH_ALEN))
+ if (device_get_ethdev_address(dev, ndev))
eth_hw_addr_random(ndev);
memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
index 5f657879134e..e641dbbea1e2 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
@@ -378,8 +378,8 @@ u32 xgene_enet_rd_stat(struct xgene_enet_pdata *pdata, u32 rd_addr)
static void xgene_gmac_set_mac_addr(struct xgene_enet_pdata *pdata)
{
+ const u8 *dev_addr = pdata->ndev->dev_addr;
u32 addr0, addr1;
- u8 *dev_addr = pdata->ndev->dev_addr;
addr0 = (dev_addr[3] << 24) | (dev_addr[2] << 16) |
(dev_addr[1] << 8) | dev_addr[0];
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 5f1fc6582d74..220dc42af31a 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -1731,7 +1731,7 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
xgene_get_port_id_acpi(dev, pdata);
#endif
- if (!device_get_mac_address(dev, ndev->dev_addr, ETH_ALEN))
+ if (device_get_ethdev_address(dev, ndev))
eth_hw_addr_random(ndev);
memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c
index f482ced2cadd..72b5e8eb0ec7 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c
@@ -165,8 +165,8 @@ static void xgene_sgmac_reset(struct xgene_enet_pdata *p)
static void xgene_sgmac_set_mac_addr(struct xgene_enet_pdata *p)
{
+ const u8 *dev_addr = p->ndev->dev_addr;
u32 addr0, addr1;
- u8 *dev_addr = p->ndev->dev_addr;
addr0 = (dev_addr[3] << 24) | (dev_addr[2] << 16) |
(dev_addr[1] << 8) | dev_addr[0];
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
index 304b5d43f236..86607b79c09f 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
@@ -207,8 +207,8 @@ static void xgene_pcs_reset(struct xgene_enet_pdata *pdata)
static void xgene_xgmac_set_mac_addr(struct xgene_enet_pdata *pdata)
{
+ const u8 *dev_addr = pdata->ndev->dev_addr;
u32 addr0, addr1;
- u8 *dev_addr = pdata->ndev->dev_addr;
addr0 = (dev_addr[3] << 24) | (dev_addr[2] << 16) |
(dev_addr[1] << 8) | dev_addr[0];
diff --git a/drivers/net/ethernet/apple/bmac.c b/drivers/net/ethernet/apple/bmac.c
index a989d2df59ad..9a650d1c1bdd 100644
--- a/drivers/net/ethernet/apple/bmac.c
+++ b/drivers/net/ethernet/apple/bmac.c
@@ -308,7 +308,7 @@ bmac_init_registers(struct net_device *dev)
{
struct bmac_data *bp = netdev_priv(dev);
volatile unsigned short regValue;
- unsigned short *pWord16;
+ const unsigned short *pWord16;
int i;
/* XXDEBUG(("bmac: enter init_registers\n")); */
@@ -371,7 +371,7 @@ bmac_init_registers(struct net_device *dev)
bmwrite(dev, BHASH1, bp->hash_table_mask[2]); /* bits 47 - 32 */
bmwrite(dev, BHASH0, bp->hash_table_mask[3]); /* bits 63 - 48 */
- pWord16 = (unsigned short *)dev->dev_addr;
+ pWord16 = (const unsigned short *)dev->dev_addr;
bmwrite(dev, MADD0, *pWord16++);
bmwrite(dev, MADD1, *pWord16++);
bmwrite(dev, MADD2, *pWord16);
@@ -521,19 +521,16 @@ static int bmac_resume(struct macio_dev *mdev)
static int bmac_set_address(struct net_device *dev, void *addr)
{
struct bmac_data *bp = netdev_priv(dev);
- unsigned char *p = addr;
- unsigned short *pWord16;
+ const unsigned short *pWord16;
unsigned long flags;
- int i;
XXDEBUG(("bmac: enter set_address\n"));
spin_lock_irqsave(&bp->lock, flags);
- for (i = 0; i < 6; ++i) {
- dev->dev_addr[i] = p[i];
- }
+ eth_hw_addr_set(dev, addr);
+
/* load up the hardware address */
- pWord16 = (unsigned short *)dev->dev_addr;
+ pWord16 = (const unsigned short *)dev->dev_addr;
bmwrite(dev, MADD0, *pWord16++);
bmwrite(dev, MADD1, *pWord16++);
bmwrite(dev, MADD2, *pWord16);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
index bed481816ea3..062a300a566a 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
@@ -217,7 +217,7 @@ struct aq_hw_ops {
int (*hw_ring_tx_head_update)(struct aq_hw_s *self,
struct aq_ring_s *aq_ring);
- int (*hw_set_mac_address)(struct aq_hw_s *self, u8 *mac_addr);
+ int (*hw_set_mac_address)(struct aq_hw_s *self, const u8 *mac_addr);
int (*hw_soft_reset)(struct aq_hw_s *self);
@@ -226,7 +226,7 @@ struct aq_hw_ops {
int (*hw_reset)(struct aq_hw_s *self);
- int (*hw_init)(struct aq_hw_s *self, u8 *mac_addr);
+ int (*hw_init)(struct aq_hw_s *self, const u8 *mac_addr);
int (*hw_start)(struct aq_hw_s *self);
@@ -373,7 +373,7 @@ struct aq_fw_ops {
int (*set_phyloopback)(struct aq_hw_s *self, u32 mode, bool enable);
int (*set_power)(struct aq_hw_s *self, unsigned int power_state,
- u8 *mac);
+ const u8 *mac);
int (*send_fw_request)(struct aq_hw_s *self,
const struct hw_fw_request_iface *fw_req,
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_macsec.c b/drivers/net/ethernet/aquantia/atlantic/aq_macsec.c
index 4a6dfac857ca..02058fe79f52 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_macsec.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_macsec.c
@@ -35,7 +35,7 @@ static int aq_apply_macsec_cfg(struct aq_nic_s *nic);
static int aq_apply_secy_cfg(struct aq_nic_s *nic,
const struct macsec_secy *secy);
-static void aq_ether_addr_to_mac(u32 mac[2], unsigned char *emac)
+static void aq_ether_addr_to_mac(u32 mac[2], const unsigned char *emac)
{
u32 tmp[2] = { 0 };
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index 6c049864dac0..1acf544afeb4 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -300,6 +300,7 @@ static bool aq_nic_is_valid_ether_addr(const u8 *addr)
int aq_nic_ndev_register(struct aq_nic_s *self)
{
+ u8 addr[ETH_ALEN];
int err = 0;
if (!self->ndev) {
@@ -316,12 +317,13 @@ int aq_nic_ndev_register(struct aq_nic_s *self)
#endif
mutex_lock(&self->fwreq_mutex);
- err = self->aq_fw_ops->get_mac_permanent(self->aq_hw,
- self->ndev->dev_addr);
+ err = self->aq_fw_ops->get_mac_permanent(self->aq_hw, addr);
mutex_unlock(&self->fwreq_mutex);
if (err)
goto err_exit;
+ eth_hw_addr_set(self->ndev, addr);
+
if (!is_valid_ether_addr(self->ndev->dev_addr) ||
!aq_nic_is_valid_ether_addr(self->ndev->dev_addr)) {
netdev_warn(self->ndev, "MAC is invalid, will use random.");
@@ -332,7 +334,7 @@ int aq_nic_ndev_register(struct aq_nic_s *self)
{
static u8 mac_addr_permanent[] = AQ_CFG_MAC_ADDR_PERMANENT;
- ether_addr_copy(self->ndev->dev_addr, mac_addr_permanent);
+ eth_hw_addr_set(self->ndev, mac_addr_permanent);
}
#endif
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
index 611875ef2cd1..4625ccb79499 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
@@ -322,7 +322,7 @@ static int hw_atl_a0_hw_init_rx_path(struct aq_hw_s *self)
return aq_hw_err_from_flags(self);
}
-static int hw_atl_a0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr)
+static int hw_atl_a0_hw_mac_addr_set(struct aq_hw_s *self, const u8 *mac_addr)
{
unsigned int h = 0U;
unsigned int l = 0U;
@@ -348,7 +348,7 @@ err_exit:
return err;
}
-static int hw_atl_a0_hw_init(struct aq_hw_s *self, u8 *mac_addr)
+static int hw_atl_a0_hw_init(struct aq_hw_s *self, const u8 *mac_addr)
{
static u32 aq_hw_atl_igcr_table_[4][2] = {
[AQ_HW_IRQ_INVALID] = { 0x20000000U, 0x20000000U },
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
index 9f1b15077e7d..d875ce3ec759 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -533,7 +533,7 @@ static int hw_atl_b0_hw_init_rx_path(struct aq_hw_s *self)
return aq_hw_err_from_flags(self);
}
-int hw_atl_b0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr)
+int hw_atl_b0_hw_mac_addr_set(struct aq_hw_s *self, const u8 *mac_addr)
{
unsigned int h = 0U;
unsigned int l = 0U;
@@ -558,7 +558,7 @@ err_exit:
return err;
}
-static int hw_atl_b0_hw_init(struct aq_hw_s *self, u8 *mac_addr)
+static int hw_atl_b0_hw_init(struct aq_hw_s *self, const u8 *mac_addr)
{
static u32 aq_hw_atl_igcr_table_[4][2] = {
[AQ_HW_IRQ_INVALID] = { 0x20000000U, 0x20000000U },
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h
index d8db972113ec..5298846dd9f7 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h
@@ -58,7 +58,7 @@ int hw_atl_b0_hw_ring_rx_stop(struct aq_hw_s *self, struct aq_ring_s *ring);
void hw_atl_b0_hw_init_rx_rss_ctrl1(struct aq_hw_s *self);
-int hw_atl_b0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr);
+int hw_atl_b0_hw_mac_addr_set(struct aq_hw_s *self, const u8 *mac_addr);
int hw_atl_b0_set_fc(struct aq_hw_s *self, u32 fc, u32 tc);
int hw_atl_b0_set_loopback(struct aq_hw_s *self, u32 mode, bool enable);
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
index 404cbf60d3f2..fc0e66006644 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
@@ -944,7 +944,7 @@ u32 hw_atl_utils_get_fw_version(struct aq_hw_s *self)
}
static int aq_fw1x_set_wake_magic(struct aq_hw_s *self, bool wol_enabled,
- u8 *mac)
+ const u8 *mac)
{
struct hw_atl_utils_fw_rpc *prpc = NULL;
unsigned int rpc_size = 0U;
@@ -987,7 +987,7 @@ err_exit:
}
static int aq_fw1x_set_power(struct aq_hw_s *self, unsigned int power_state,
- u8 *mac)
+ const u8 *mac)
{
struct hw_atl_utils_fw_rpc *prpc = NULL;
unsigned int rpc_size = 0U;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
index ee0c22d04935..eac631c45c56 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
@@ -358,7 +358,7 @@ static int aq_fw2x_get_phy_temp(struct aq_hw_s *self, int *temp)
return 0;
}
-static int aq_fw2x_set_wol(struct aq_hw_s *self, u8 *mac)
+static int aq_fw2x_set_wol(struct aq_hw_s *self, const u8 *mac)
{
struct hw_atl_utils_fw_rpc *rpc = NULL;
struct offload_info *info = NULL;
@@ -404,7 +404,7 @@ err_exit:
}
static int aq_fw2x_set_power(struct aq_hw_s *self, unsigned int power_state,
- u8 *mac)
+ const u8 *mac)
{
int err = 0;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c
index 92f64048bf69..c98708bb044c 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c
@@ -516,7 +516,7 @@ static int hw_atl2_hw_init_rx_path(struct aq_hw_s *self)
return aq_hw_err_from_flags(self);
}
-static int hw_atl2_hw_init(struct aq_hw_s *self, u8 *mac_addr)
+static int hw_atl2_hw_init(struct aq_hw_s *self, const u8 *mac_addr)
{
static u32 aq_hw_atl2_igcr_table_[4][2] = {
[AQ_HW_IRQ_INVALID] = { 0x20000000U, 0x20000000U },
diff --git a/drivers/net/ethernet/arc/Kconfig b/drivers/net/ethernet/arc/Kconfig
index 92a79c4ffa2c..0a67612af228 100644
--- a/drivers/net/ethernet/arc/Kconfig
+++ b/drivers/net/ethernet/arc/Kconfig
@@ -26,7 +26,7 @@ config ARC_EMAC_CORE
config ARC_EMAC
tristate "ARC EMAC support"
select ARC_EMAC_CORE
- depends on OF_IRQ && OF_NET
+ depends on OF_IRQ
depends on ARC || COMPILE_TEST
help
On some legacy ARC (Synopsys) FPGA boards such as ARCAngel4/ML50x
@@ -36,7 +36,7 @@ config ARC_EMAC
config EMAC_ROCKCHIP
tristate "Rockchip EMAC support"
select ARC_EMAC_CORE
- depends on OF_IRQ && OF_NET && REGULATOR
+ depends on OF_IRQ && REGULATOR
depends on ARCH_ROCKCHIP || COMPILE_TEST
help
Support for Rockchip RK3036/RK3066/RK3188 EMAC ethernet controllers.
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index 38c288ec9059..c642c3d3e600 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -773,7 +773,7 @@ static int arc_emac_set_address(struct net_device *ndev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
+ eth_hw_addr_set(ndev, addr->sa_data);
arc_emac_set_address_internal(ndev);
@@ -941,7 +941,7 @@ int arc_emac_probe(struct net_device *ndev, int interface)
}
/* Get MAC address from device tree */
- err = of_get_mac_address(dev->of_node, ndev->dev_addr);
+ err = of_get_ethdev_address(dev->of_node, ndev);
if (err)
eth_hw_addr_random(ndev);
diff --git a/drivers/net/ethernet/arc/emac_mdio.c b/drivers/net/ethernet/arc/emac_mdio.c
index 54cdafdd067d..9acf589b1178 100644
--- a/drivers/net/ethernet/arc/emac_mdio.c
+++ b/drivers/net/ethernet/arc/emac_mdio.c
@@ -151,10 +151,9 @@ int arc_mdio_probe(struct arc_emac_priv *priv)
data->reset_gpio = devm_gpiod_get_optional(priv->dev, "phy-reset",
GPIOD_OUT_LOW);
if (IS_ERR(data->reset_gpio)) {
- error = PTR_ERR(data->reset_gpio);
- dev_err(priv->dev, "Failed to request gpio: %d\n", error);
mdiobus_free(bus);
- return error;
+ return dev_err_probe(priv->dev, PTR_ERR(data->reset_gpio),
+ "Failed to request gpio\n");
}
of_property_read_u32(np, "phy-reset-duration", &data->msec);
@@ -166,9 +165,9 @@ int arc_mdio_probe(struct arc_emac_priv *priv)
error = of_mdiobus_register(bus, priv->dev->of_node);
if (error) {
- dev_err(priv->dev, "cannot register MDIO bus %s\n", bus->name);
mdiobus_free(bus);
- return error;
+ return dev_err_probe(priv->dev, error,
+ "cannot register MDIO bus %s\n", bus->name);
}
return 0;
diff --git a/drivers/net/ethernet/asix/Kconfig b/drivers/net/ethernet/asix/Kconfig
new file mode 100644
index 000000000000..eed02453314c
--- /dev/null
+++ b/drivers/net/ethernet/asix/Kconfig
@@ -0,0 +1,35 @@
+#
+# Asix network device configuration
+#
+
+config NET_VENDOR_ASIX
+ bool "Asix devices"
+ default y
+ help
+ If you have a network (Ethernet, non-USB, not NE2000 compatible)
+ interface based on a chip from ASIX, say Y.
+
+if NET_VENDOR_ASIX
+
+config SPI_AX88796C
+ tristate "Asix AX88796C-SPI support"
+ select PHYLIB
+ depends on SPI
+ depends on GPIOLIB
+ help
+ Say Y here if you intend to use ASIX AX88796C attached in SPI mode.
+
+config SPI_AX88796C_COMPRESSION
+ bool "SPI transfer compression"
+ default n
+ depends on SPI_AX88796C
+ help
+ Say Y here to enable SPI transfer compression. It saves up
+ to 24 dummy cycles during each transfer which may noticeably
+ speed up short transfers. This sets the default value that is
+ inherited by network interfaces during probe. It can be
+ changed at run time via spi-compression ethtool tunable.
+
+ If unsure say N.
+
+endif # NET_VENDOR_ASIX
diff --git a/drivers/net/ethernet/asix/Makefile b/drivers/net/ethernet/asix/Makefile
new file mode 100644
index 000000000000..0bfbbb042634
--- /dev/null
+++ b/drivers/net/ethernet/asix/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for the Asix network device drivers.
+#
+
+obj-$(CONFIG_SPI_AX88796C) += ax88796c.o
+ax88796c-y := ax88796c_main.o ax88796c_ioctl.o ax88796c_spi.o
diff --git a/drivers/net/ethernet/asix/ax88796c_ioctl.c b/drivers/net/ethernet/asix/ax88796c_ioctl.c
new file mode 100644
index 000000000000..916ae380a004
--- /dev/null
+++ b/drivers/net/ethernet/asix/ax88796c_ioctl.c
@@ -0,0 +1,239 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2010 ASIX Electronics Corporation
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd.
+ *
+ * ASIX AX88796C SPI Fast Ethernet Linux driver
+ */
+
+#define pr_fmt(fmt) "ax88796c: " fmt
+
+#include <linux/bitmap.h>
+#include <linux/iopoll.h>
+#include <linux/phy.h>
+#include <linux/netdevice.h>
+
+#include "ax88796c_main.h"
+#include "ax88796c_ioctl.h"
+
+static const char ax88796c_priv_flag_names[][ETH_GSTRING_LEN] = {
+ "SPICompression",
+};
+
+static void
+ax88796c_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *info)
+{
+ /* Inherit standard device info */
+ strncpy(info->driver, DRV_NAME, sizeof(info->driver));
+}
+
+static u32 ax88796c_get_msglevel(struct net_device *ndev)
+{
+ struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
+
+ return ax_local->msg_enable;
+}
+
+static void ax88796c_set_msglevel(struct net_device *ndev, u32 level)
+{
+ struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
+
+ ax_local->msg_enable = level;
+}
+
+static void
+ax88796c_get_pauseparam(struct net_device *ndev, struct ethtool_pauseparam *pause)
+{
+ struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
+
+ pause->tx_pause = !!(ax_local->flowctrl & AX_FC_TX);
+ pause->rx_pause = !!(ax_local->flowctrl & AX_FC_RX);
+ pause->autoneg = (ax_local->flowctrl & AX_FC_ANEG) ?
+ AUTONEG_ENABLE :
+ AUTONEG_DISABLE;
+}
+
+static int
+ax88796c_set_pauseparam(struct net_device *ndev, struct ethtool_pauseparam *pause)
+{
+ struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
+ int fc;
+
+ /* The following logic comes from phylink_ethtool_set_pauseparam() */
+ fc = pause->tx_pause ? AX_FC_TX : 0;
+ fc |= pause->rx_pause ? AX_FC_RX : 0;
+ fc |= pause->autoneg ? AX_FC_ANEG : 0;
+
+ ax_local->flowctrl = fc;
+
+ if (pause->autoneg) {
+ phy_set_asym_pause(ax_local->phydev, pause->tx_pause,
+ pause->rx_pause);
+ } else {
+ int maccr = 0;
+
+ phy_set_asym_pause(ax_local->phydev, 0, 0);
+ maccr |= (ax_local->flowctrl & AX_FC_RX) ? MACCR_RXFC_ENABLE : 0;
+ maccr |= (ax_local->flowctrl & AX_FC_TX) ? MACCR_TXFC_ENABLE : 0;
+
+ mutex_lock(&ax_local->spi_lock);
+
+ maccr |= AX_READ(&ax_local->ax_spi, P0_MACCR) &
+ ~(MACCR_TXFC_ENABLE | MACCR_RXFC_ENABLE);
+ AX_WRITE(&ax_local->ax_spi, maccr, P0_MACCR);
+
+ mutex_unlock(&ax_local->spi_lock);
+ }
+
+ return 0;
+}
+
+static int ax88796c_get_regs_len(struct net_device *ndev)
+{
+ return AX88796C_REGDUMP_LEN + AX88796C_PHY_REGDUMP_LEN;
+}
+
+static void
+ax88796c_get_regs(struct net_device *ndev, struct ethtool_regs *regs, void *_p)
+{
+ struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
+ int offset, i;
+ u16 *p = _p;
+
+ memset(p, 0, ax88796c_get_regs_len(ndev));
+
+ mutex_lock(&ax_local->spi_lock);
+
+ for (offset = 0; offset < AX88796C_REGDUMP_LEN; offset += 2) {
+ if (!test_bit(offset / 2, ax88796c_no_regs_mask))
+ *p = AX_READ(&ax_local->ax_spi, offset);
+ p++;
+ }
+
+ mutex_unlock(&ax_local->spi_lock);
+
+ for (i = 0; i < AX88796C_PHY_REGDUMP_LEN / 2; i++) {
+ *p = phy_read(ax_local->phydev, i);
+ p++;
+ }
+}
+
+static void
+ax88796c_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
+{
+ switch (stringset) {
+ case ETH_SS_PRIV_FLAGS:
+ memcpy(data, ax88796c_priv_flag_names,
+ sizeof(ax88796c_priv_flag_names));
+ break;
+ }
+}
+
+static int
+ax88796c_get_sset_count(struct net_device *ndev, int stringset)
+{
+ int ret = 0;
+
+ switch (stringset) {
+ case ETH_SS_PRIV_FLAGS:
+ ret = ARRAY_SIZE(ax88796c_priv_flag_names);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
+static int ax88796c_set_priv_flags(struct net_device *ndev, u32 flags)
+{
+ struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
+
+ if (flags & ~AX_PRIV_FLAGS_MASK)
+ return -EOPNOTSUPP;
+
+ if ((ax_local->priv_flags ^ flags) & AX_CAP_COMP)
+ if (netif_running(ndev))
+ return -EBUSY;
+
+ ax_local->priv_flags = flags;
+
+ return 0;
+}
+
+static u32 ax88796c_get_priv_flags(struct net_device *ndev)
+{
+ struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
+
+ return ax_local->priv_flags;
+}
+
+int ax88796c_mdio_read(struct mii_bus *mdiobus, int phy_id, int loc)
+{
+ struct ax88796c_device *ax_local = mdiobus->priv;
+ int ret;
+
+ mutex_lock(&ax_local->spi_lock);
+ AX_WRITE(&ax_local->ax_spi, MDIOCR_RADDR(loc)
+ | MDIOCR_FADDR(phy_id) | MDIOCR_READ, P2_MDIOCR);
+
+ ret = read_poll_timeout(AX_READ, ret,
+ (ret != 0),
+ 0, jiffies_to_usecs(HZ / 100), false,
+ &ax_local->ax_spi, P2_MDIOCR);
+ if (!ret)
+ ret = AX_READ(&ax_local->ax_spi, P2_MDIODR);
+
+ mutex_unlock(&ax_local->spi_lock);
+
+ return ret;
+}
+
+int
+ax88796c_mdio_write(struct mii_bus *mdiobus, int phy_id, int loc, u16 val)
+{
+ struct ax88796c_device *ax_local = mdiobus->priv;
+ int ret;
+
+ mutex_lock(&ax_local->spi_lock);
+ AX_WRITE(&ax_local->ax_spi, val, P2_MDIODR);
+
+ AX_WRITE(&ax_local->ax_spi,
+ MDIOCR_RADDR(loc) | MDIOCR_FADDR(phy_id)
+ | MDIOCR_WRITE, P2_MDIOCR);
+
+ ret = read_poll_timeout(AX_READ, ret,
+ ((ret & MDIOCR_VALID) != 0), 0,
+ jiffies_to_usecs(HZ / 100), false,
+ &ax_local->ax_spi, P2_MDIOCR);
+ mutex_unlock(&ax_local->spi_lock);
+
+ return ret;
+}
+
+const struct ethtool_ops ax88796c_ethtool_ops = {
+ .get_drvinfo = ax88796c_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_msglevel = ax88796c_get_msglevel,
+ .set_msglevel = ax88796c_set_msglevel,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
+ .nway_reset = phy_ethtool_nway_reset,
+ .get_pauseparam = ax88796c_get_pauseparam,
+ .set_pauseparam = ax88796c_set_pauseparam,
+ .get_regs_len = ax88796c_get_regs_len,
+ .get_regs = ax88796c_get_regs,
+ .get_strings = ax88796c_get_strings,
+ .get_sset_count = ax88796c_get_sset_count,
+ .get_priv_flags = ax88796c_get_priv_flags,
+ .set_priv_flags = ax88796c_set_priv_flags,
+};
+
+int ax88796c_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
+{
+ int ret;
+
+ ret = phy_mii_ioctl(ndev->phydev, ifr, cmd);
+
+ return ret;
+}
diff --git a/drivers/net/ethernet/asix/ax88796c_ioctl.h b/drivers/net/ethernet/asix/ax88796c_ioctl.h
new file mode 100644
index 000000000000..34d2a7dcc5ef
--- /dev/null
+++ b/drivers/net/ethernet/asix/ax88796c_ioctl.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2010 ASIX Electronics Corporation
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd.
+ *
+ * ASIX AX88796C SPI Fast Ethernet Linux driver
+ */
+
+#ifndef _AX88796C_IOCTL_H
+#define _AX88796C_IOCTL_H
+
+#include <linux/ethtool.h>
+#include <linux/netdevice.h>
+
+#include "ax88796c_main.h"
+
+extern const struct ethtool_ops ax88796c_ethtool_ops;
+
+bool ax88796c_check_power(const struct ax88796c_device *ax_local);
+bool ax88796c_check_power_and_wake(struct ax88796c_device *ax_local);
+void ax88796c_set_power_saving(struct ax88796c_device *ax_local, u8 ps_level);
+int ax88796c_mdio_read(struct mii_bus *mdiobus, int phy_id, int loc);
+int ax88796c_mdio_write(struct mii_bus *mdiobus, int phy_id, int loc, u16 val);
+int ax88796c_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
+
+#endif
diff --git a/drivers/net/ethernet/asix/ax88796c_main.c b/drivers/net/ethernet/asix/ax88796c_main.c
new file mode 100644
index 000000000000..cfc597f72e3d
--- /dev/null
+++ b/drivers/net/ethernet/asix/ax88796c_main.c
@@ -0,0 +1,1163 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2010 ASIX Electronics Corporation
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd.
+ *
+ * ASIX AX88796C SPI Fast Ethernet Linux driver
+ */
+
+#define pr_fmt(fmt) "ax88796c: " fmt
+
+#include "ax88796c_main.h"
+#include "ax88796c_ioctl.h"
+
+#include <linux/bitmap.h>
+#include <linux/etherdevice.h>
+#include <linux/iopoll.h>
+#include <linux/lockdep.h>
+#include <linux/mdio.h>
+#include <linux/minmax.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/of.h>
+#include <linux/phy.h>
+#include <linux/skbuff.h>
+#include <linux/spi/spi.h>
+
+static int comp = IS_ENABLED(CONFIG_SPI_AX88796C_COMPRESSION);
+static int msg_enable = NETIF_MSG_PROBE |
+ NETIF_MSG_LINK |
+ NETIF_MSG_RX_ERR |
+ NETIF_MSG_TX_ERR;
+
+static const char *no_regs_list = "80018001,e1918001,8001a001,fc0d0000";
+unsigned long ax88796c_no_regs_mask[AX88796C_REGDUMP_LEN / (sizeof(unsigned long) * 8)];
+
+module_param(msg_enable, int, 0444);
+MODULE_PARM_DESC(msg_enable, "Message mask (see linux/netdevice.h for bitmap)");
+
+static int ax88796c_soft_reset(struct ax88796c_device *ax_local)
+{
+ u16 temp;
+ int ret;
+
+ lockdep_assert_held(&ax_local->spi_lock);
+
+ AX_WRITE(&ax_local->ax_spi, PSR_RESET, P0_PSR);
+ AX_WRITE(&ax_local->ax_spi, PSR_RESET_CLR, P0_PSR);
+
+ ret = read_poll_timeout(AX_READ, ret,
+ (ret & PSR_DEV_READY),
+ 0, jiffies_to_usecs(160 * HZ / 1000), false,
+ &ax_local->ax_spi, P0_PSR);
+ if (ret)
+ return ret;
+
+ temp = AX_READ(&ax_local->ax_spi, P4_SPICR);
+ if (ax_local->priv_flags & AX_CAP_COMP) {
+ AX_WRITE(&ax_local->ax_spi,
+ (temp | SPICR_RCEN | SPICR_QCEN), P4_SPICR);
+ ax_local->ax_spi.comp = 1;
+ } else {
+ AX_WRITE(&ax_local->ax_spi,
+ (temp & ~(SPICR_RCEN | SPICR_QCEN)), P4_SPICR);
+ ax_local->ax_spi.comp = 0;
+ }
+
+ return 0;
+}
+
+static int ax88796c_reload_eeprom(struct ax88796c_device *ax_local)
+{
+ int ret;
+
+ lockdep_assert_held(&ax_local->spi_lock);
+
+ AX_WRITE(&ax_local->ax_spi, EECR_RELOAD, P3_EECR);
+
+ ret = read_poll_timeout(AX_READ, ret,
+ (ret & PSR_DEV_READY),
+ 0, jiffies_to_usecs(2 * HZ / 1000), false,
+ &ax_local->ax_spi, P0_PSR);
+ if (ret) {
+ dev_err(&ax_local->spi->dev,
+ "timeout waiting for reload eeprom\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ax88796c_set_hw_multicast(struct net_device *ndev)
+{
+ struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
+ int mc_count = netdev_mc_count(ndev);
+ u16 rx_ctl = RXCR_AB;
+
+ lockdep_assert_held(&ax_local->spi_lock);
+
+ memset(ax_local->multi_filter, 0, AX_MCAST_FILTER_SIZE);
+
+ if (ndev->flags & IFF_PROMISC) {
+ rx_ctl |= RXCR_PRO;
+
+ } else if (ndev->flags & IFF_ALLMULTI || mc_count > AX_MAX_MCAST) {
+ rx_ctl |= RXCR_AMALL;
+
+ } else if (mc_count == 0) {
+ /* just broadcast and directed */
+ } else {
+ u32 crc_bits;
+ int i;
+ struct netdev_hw_addr *ha;
+
+ netdev_for_each_mc_addr(ha, ndev) {
+ crc_bits = ether_crc(ETH_ALEN, ha->addr);
+ ax_local->multi_filter[crc_bits >> 29] |=
+ (1 << ((crc_bits >> 26) & 7));
+ }
+
+ for (i = 0; i < 4; i++) {
+ AX_WRITE(&ax_local->ax_spi,
+ ((ax_local->multi_filter[i * 2 + 1] << 8) |
+ ax_local->multi_filter[i * 2]), P3_MFAR(i));
+ }
+ }
+
+ AX_WRITE(&ax_local->ax_spi, rx_ctl, P2_RXCR);
+}
+
+static void ax88796c_set_mac_addr(struct net_device *ndev)
+{
+ struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
+
+ lockdep_assert_held(&ax_local->spi_lock);
+
+ AX_WRITE(&ax_local->ax_spi, ((u16)(ndev->dev_addr[4] << 8) |
+ (u16)ndev->dev_addr[5]), P3_MACASR0);
+ AX_WRITE(&ax_local->ax_spi, ((u16)(ndev->dev_addr[2] << 8) |
+ (u16)ndev->dev_addr[3]), P3_MACASR1);
+ AX_WRITE(&ax_local->ax_spi, ((u16)(ndev->dev_addr[0] << 8) |
+ (u16)ndev->dev_addr[1]), P3_MACASR2);
+}
+
+static void ax88796c_load_mac_addr(struct net_device *ndev)
+{
+ struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
+ u16 temp;
+
+ lockdep_assert_held(&ax_local->spi_lock);
+
+ /* Try the device tree first */
+ if (!eth_platform_get_mac_address(&ax_local->spi->dev, ndev->dev_addr) &&
+ is_valid_ether_addr(ndev->dev_addr)) {
+ if (netif_msg_probe(ax_local))
+ dev_info(&ax_local->spi->dev,
+ "MAC address read from device tree\n");
+ return;
+ }
+
+ /* Read the MAC address from AX88796C */
+ temp = AX_READ(&ax_local->ax_spi, P3_MACASR0);
+ ndev->dev_addr[5] = (u8)temp;
+ ndev->dev_addr[4] = (u8)(temp >> 8);
+
+ temp = AX_READ(&ax_local->ax_spi, P3_MACASR1);
+ ndev->dev_addr[3] = (u8)temp;
+ ndev->dev_addr[2] = (u8)(temp >> 8);
+
+ temp = AX_READ(&ax_local->ax_spi, P3_MACASR2);
+ ndev->dev_addr[1] = (u8)temp;
+ ndev->dev_addr[0] = (u8)(temp >> 8);
+
+ if (is_valid_ether_addr(ndev->dev_addr)) {
+ if (netif_msg_probe(ax_local))
+ dev_info(&ax_local->spi->dev,
+ "MAC address read from ASIX chip\n");
+ return;
+ }
+
+ /* Use random address if none found */
+ if (netif_msg_probe(ax_local))
+ dev_info(&ax_local->spi->dev, "Use random MAC address\n");
+ eth_hw_addr_random(ndev);
+}
+
+static void ax88796c_proc_tx_hdr(struct tx_pkt_info *info, u8 ip_summed)
+{
+ u16 pkt_len_bar = (~info->pkt_len & TX_HDR_SOP_PKTLENBAR);
+
+ /* Prepare SOP header */
+ info->sop.flags_len = info->pkt_len |
+ ((ip_summed == CHECKSUM_NONE) ||
+ (ip_summed == CHECKSUM_UNNECESSARY) ? TX_HDR_SOP_DICF : 0);
+
+ info->sop.seq_lenbar = ((info->seq_num << 11) & TX_HDR_SOP_SEQNUM)
+ | pkt_len_bar;
+ cpu_to_be16s(&info->sop.flags_len);
+ cpu_to_be16s(&info->sop.seq_lenbar);
+
+ /* Prepare Segment header */
+ info->seg.flags_seqnum_seglen = TX_HDR_SEG_FS | TX_HDR_SEG_LS
+ | info->pkt_len;
+
+ info->seg.eo_so_seglenbar = pkt_len_bar;
+
+ cpu_to_be16s(&info->seg.flags_seqnum_seglen);
+ cpu_to_be16s(&info->seg.eo_so_seglenbar);
+
+ /* Prepare EOP header */
+ info->eop.seq_len = ((info->seq_num << 11) &
+ TX_HDR_EOP_SEQNUM) | info->pkt_len;
+ info->eop.seqbar_lenbar = ((~info->seq_num << 11) &
+ TX_HDR_EOP_SEQNUMBAR) | pkt_len_bar;
+
+ cpu_to_be16s(&info->eop.seq_len);
+ cpu_to_be16s(&info->eop.seqbar_lenbar);
+}
+
+static int
+ax88796c_check_free_pages(struct ax88796c_device *ax_local, u8 need_pages)
+{
+ u8 free_pages;
+ u16 tmp;
+
+ lockdep_assert_held(&ax_local->spi_lock);
+
+ free_pages = AX_READ(&ax_local->ax_spi, P0_TFBFCR) & TX_FREEBUF_MASK;
+ if (free_pages < need_pages) {
+ /* schedule free page interrupt */
+ tmp = AX_READ(&ax_local->ax_spi, P0_TFBFCR)
+ & TFBFCR_SCHE_FREE_PAGE;
+ AX_WRITE(&ax_local->ax_spi, tmp | TFBFCR_TX_PAGE_SET |
+ TFBFCR_SET_FREE_PAGE(need_pages),
+ P0_TFBFCR);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static struct sk_buff *
+ax88796c_tx_fixup(struct net_device *ndev, struct sk_buff_head *q)
+{
+ struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
+ u8 spi_len = ax_local->ax_spi.comp ? 1 : 4;
+ struct sk_buff *skb;
+ struct tx_pkt_info info;
+ struct skb_data *entry;
+ u16 pkt_len;
+ u8 padlen, seq_num;
+ u8 need_pages;
+ int headroom;
+ int tailroom;
+
+ if (skb_queue_empty(q))
+ return NULL;
+
+ skb = skb_peek(q);
+ pkt_len = skb->len;
+ need_pages = (pkt_len + TX_OVERHEAD + 127) >> 7;
+ if (ax88796c_check_free_pages(ax_local, need_pages) != 0)
+ return NULL;
+
+ headroom = skb_headroom(skb);
+ tailroom = skb_tailroom(skb);
+ padlen = round_up(pkt_len, 4) - pkt_len;
+ seq_num = ++ax_local->seq_num & 0x1F;
+
+ info.pkt_len = pkt_len;
+
+ if (skb_cloned(skb) ||
+ (headroom < (TX_OVERHEAD + spi_len)) ||
+ (tailroom < (padlen + TX_EOP_SIZE))) {
+ size_t h = max((TX_OVERHEAD + spi_len) - headroom, 0);
+ size_t t = max((padlen + TX_EOP_SIZE) - tailroom, 0);
+
+ if (pskb_expand_head(skb, h, t, GFP_KERNEL))
+ return NULL;
+ }
+
+ info.seq_num = seq_num;
+ ax88796c_proc_tx_hdr(&info, skb->ip_summed);
+
+ /* SOP and SEG header */
+ memcpy(skb_push(skb, TX_OVERHEAD), &info.sop, TX_OVERHEAD);
+
+ /* Write SPI TXQ header */
+ memcpy(skb_push(skb, spi_len), ax88796c_tx_cmd_buf, spi_len);
+
+ /* Make 32-bit alignment */
+ skb_put(skb, padlen);
+
+ /* EOP header */
+ memcpy(skb_put(skb, TX_EOP_SIZE), &info.eop, TX_EOP_SIZE);
+
+ skb_unlink(skb, q);
+
+ entry = (struct skb_data *)skb->cb;
+ memset(entry, 0, sizeof(*entry));
+ entry->len = pkt_len;
+
+ if (netif_msg_pktdata(ax_local)) {
+ char pfx[IFNAMSIZ + 7];
+
+ snprintf(pfx, sizeof(pfx), "%s: ", ndev->name);
+
+ netdev_info(ndev, "TX packet len %d, total len %d, seq %d\n",
+ pkt_len, skb->len, seq_num);
+
+ netdev_info(ndev, " SPI Header:\n");
+ print_hex_dump(KERN_INFO, pfx, DUMP_PREFIX_OFFSET, 16, 1,
+ skb->data, 4, 0);
+
+ netdev_info(ndev, " TX SOP:\n");
+ print_hex_dump(KERN_INFO, pfx, DUMP_PREFIX_OFFSET, 16, 1,
+ skb->data + 4, TX_OVERHEAD, 0);
+
+ netdev_info(ndev, " TX packet:\n");
+ print_hex_dump(KERN_INFO, pfx, DUMP_PREFIX_OFFSET, 16, 1,
+ skb->data + 4 + TX_OVERHEAD,
+ skb->len - TX_EOP_SIZE - 4 - TX_OVERHEAD, 0);
+
+ netdev_info(ndev, " TX EOP:\n");
+ print_hex_dump(KERN_INFO, pfx, DUMP_PREFIX_OFFSET, 16, 1,
+ skb->data + skb->len - 4, 4, 0);
+ }
+
+ return skb;
+}
+
+static int ax88796c_hard_xmit(struct ax88796c_device *ax_local)
+{
+ struct ax88796c_pcpu_stats *stats;
+ struct sk_buff *tx_skb;
+ struct skb_data *entry;
+ unsigned long flags;
+
+ lockdep_assert_held(&ax_local->spi_lock);
+
+ stats = this_cpu_ptr(ax_local->stats);
+ tx_skb = ax88796c_tx_fixup(ax_local->ndev, &ax_local->tx_wait_q);
+
+ if (!tx_skb) {
+ this_cpu_inc(ax_local->stats->tx_dropped);
+ return 0;
+ }
+ entry = (struct skb_data *)tx_skb->cb;
+
+ AX_WRITE(&ax_local->ax_spi,
+ (TSNR_TXB_START | TSNR_PKT_CNT(1)), P0_TSNR);
+
+ axspi_write_txq(&ax_local->ax_spi, tx_skb->data, tx_skb->len);
+
+ if (((AX_READ(&ax_local->ax_spi, P0_TSNR) & TXNR_TXB_IDLE) == 0) ||
+ ((ISR_TXERR & AX_READ(&ax_local->ax_spi, P0_ISR)) != 0)) {
+ /* Ack tx error int */
+ AX_WRITE(&ax_local->ax_spi, ISR_TXERR, P0_ISR);
+
+ this_cpu_inc(ax_local->stats->tx_dropped);
+
+ if (net_ratelimit())
+ netif_err(ax_local, tx_err, ax_local->ndev,
+ "TX FIFO error, re-initialize the TX bridge\n");
+
+ /* Reinitial tx bridge */
+ AX_WRITE(&ax_local->ax_spi, TXNR_TXB_REINIT |
+ AX_READ(&ax_local->ax_spi, P0_TSNR), P0_TSNR);
+ ax_local->seq_num = 0;
+ } else {
+ flags = u64_stats_update_begin_irqsave(&stats->syncp);
+ u64_stats_inc(&stats->tx_packets);
+ u64_stats_add(&stats->tx_bytes, entry->len);
+ u64_stats_update_end_irqrestore(&stats->syncp, flags);
+ }
+
+ entry->state = tx_done;
+ dev_kfree_skb(tx_skb);
+
+ return 1;
+}
+
+static int
+ax88796c_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
+
+ skb_queue_tail(&ax_local->tx_wait_q, skb);
+ if (skb_queue_len(&ax_local->tx_wait_q) > TX_QUEUE_HIGH_WATER)
+ netif_stop_queue(ndev);
+
+ set_bit(EVENT_TX, &ax_local->flags);
+ schedule_work(&ax_local->ax_work);
+
+ return NETDEV_TX_OK;
+}
+
+static void
+ax88796c_skb_return(struct ax88796c_device *ax_local,
+ struct sk_buff *skb, struct rx_header *rxhdr)
+{
+ struct net_device *ndev = ax_local->ndev;
+ struct ax88796c_pcpu_stats *stats;
+ unsigned long flags;
+ int status;
+
+ stats = this_cpu_ptr(ax_local->stats);
+
+ do {
+ if (!(ndev->features & NETIF_F_RXCSUM))
+ break;
+
+ /* checksum error bit is set */
+ if ((rxhdr->flags & RX_HDR3_L3_ERR) ||
+ (rxhdr->flags & RX_HDR3_L4_ERR))
+ break;
+
+ /* Other types may be indicated by more than one bit. */
+ if ((rxhdr->flags & RX_HDR3_L4_TYPE_TCP) ||
+ (rxhdr->flags & RX_HDR3_L4_TYPE_UDP))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ } while (0);
+
+ flags = u64_stats_update_begin_irqsave(&stats->syncp);
+ u64_stats_inc(&stats->rx_packets);
+ u64_stats_add(&stats->rx_bytes, skb->len);
+ u64_stats_update_end_irqrestore(&stats->syncp, flags);
+
+ skb->dev = ndev;
+ skb->protocol = eth_type_trans(skb, ax_local->ndev);
+
+ netif_info(ax_local, rx_status, ndev, "< rx, len %zu, type 0x%x\n",
+ skb->len + sizeof(struct ethhdr), skb->protocol);
+
+ status = netif_rx_ni(skb);
+ if (status != NET_RX_SUCCESS && net_ratelimit())
+ netif_info(ax_local, rx_err, ndev,
+ "netif_rx status %d\n", status);
+}
+
+static void
+ax88796c_rx_fixup(struct ax88796c_device *ax_local, struct sk_buff *rx_skb)
+{
+ struct rx_header *rxhdr = (struct rx_header *)rx_skb->data;
+ struct net_device *ndev = ax_local->ndev;
+ u16 len;
+
+ be16_to_cpus(&rxhdr->flags_len);
+ be16_to_cpus(&rxhdr->seq_lenbar);
+ be16_to_cpus(&rxhdr->flags);
+
+ if ((rxhdr->flags_len & RX_HDR1_PKT_LEN) !=
+ (~rxhdr->seq_lenbar & 0x7FF)) {
+ netif_err(ax_local, rx_err, ndev, "Header error\n");
+
+ this_cpu_inc(ax_local->stats->rx_frame_errors);
+ kfree_skb(rx_skb);
+ return;
+ }
+
+ if ((rxhdr->flags_len & RX_HDR1_MII_ERR) ||
+ (rxhdr->flags_len & RX_HDR1_CRC_ERR)) {
+ netif_err(ax_local, rx_err, ndev, "CRC or MII error\n");
+
+ this_cpu_inc(ax_local->stats->rx_crc_errors);
+ kfree_skb(rx_skb);
+ return;
+ }
+
+ len = rxhdr->flags_len & RX_HDR1_PKT_LEN;
+ if (netif_msg_pktdata(ax_local)) {
+ char pfx[IFNAMSIZ + 7];
+
+ snprintf(pfx, sizeof(pfx), "%s: ", ndev->name);
+ netdev_info(ndev, "RX data, total len %d, packet len %d\n",
+ rx_skb->len, len);
+
+ netdev_info(ndev, " Dump RX packet header:");
+ print_hex_dump(KERN_INFO, pfx, DUMP_PREFIX_OFFSET, 16, 1,
+ rx_skb->data, sizeof(*rxhdr), 0);
+
+ netdev_info(ndev, " Dump RX packet:");
+ print_hex_dump(KERN_INFO, pfx, DUMP_PREFIX_OFFSET, 16, 1,
+ rx_skb->data + sizeof(*rxhdr), len, 0);
+ }
+
+ skb_pull(rx_skb, sizeof(*rxhdr));
+ pskb_trim(rx_skb, len);
+
+ ax88796c_skb_return(ax_local, rx_skb, rxhdr);
+}
+
+static int ax88796c_receive(struct net_device *ndev)
+{
+ struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
+ struct skb_data *entry;
+ u16 w_count, pkt_len;
+ struct sk_buff *skb;
+ u8 pkt_cnt;
+
+ lockdep_assert_held(&ax_local->spi_lock);
+
+ /* check rx packet and total word count */
+ AX_WRITE(&ax_local->ax_spi, AX_READ(&ax_local->ax_spi, P0_RTWCR)
+ | RTWCR_RX_LATCH, P0_RTWCR);
+
+ pkt_cnt = AX_READ(&ax_local->ax_spi, P0_RXBCR2) & RXBCR2_PKT_MASK;
+ if (!pkt_cnt)
+ return 0;
+
+ pkt_len = AX_READ(&ax_local->ax_spi, P0_RCPHR) & 0x7FF;
+
+ w_count = round_up(pkt_len + 6, 4) >> 1;
+
+ skb = netdev_alloc_skb(ndev, w_count * 2);
+ if (!skb) {
+ AX_WRITE(&ax_local->ax_spi, RXBCR1_RXB_DISCARD, P0_RXBCR1);
+ this_cpu_inc(ax_local->stats->rx_dropped);
+ return 0;
+ }
+ entry = (struct skb_data *)skb->cb;
+
+ AX_WRITE(&ax_local->ax_spi, RXBCR1_RXB_START | w_count, P0_RXBCR1);
+
+ axspi_read_rxq(&ax_local->ax_spi,
+ skb_put(skb, w_count * 2), skb->len);
+
+ /* Check if rx bridge is idle */
+ if ((AX_READ(&ax_local->ax_spi, P0_RXBCR2) & RXBCR2_RXB_IDLE) == 0) {
+ if (net_ratelimit())
+ netif_err(ax_local, rx_err, ndev,
+ "Rx Bridge is not idle\n");
+ AX_WRITE(&ax_local->ax_spi, RXBCR2_RXB_REINIT, P0_RXBCR2);
+
+ entry->state = rx_err;
+ } else {
+ entry->state = rx_done;
+ }
+
+ AX_WRITE(&ax_local->ax_spi, ISR_RXPKT, P0_ISR);
+
+ ax88796c_rx_fixup(ax_local, skb);
+
+ return 1;
+}
+
+static int ax88796c_process_isr(struct ax88796c_device *ax_local)
+{
+ struct net_device *ndev = ax_local->ndev;
+ int todo = 0;
+ u16 isr;
+
+ lockdep_assert_held(&ax_local->spi_lock);
+
+ isr = AX_READ(&ax_local->ax_spi, P0_ISR);
+ AX_WRITE(&ax_local->ax_spi, isr, P0_ISR);
+
+ netif_dbg(ax_local, intr, ndev, " ISR 0x%04x\n", isr);
+
+ if (isr & ISR_TXERR) {
+ netif_dbg(ax_local, intr, ndev, " TXERR interrupt\n");
+ AX_WRITE(&ax_local->ax_spi, TXNR_TXB_REINIT, P0_TSNR);
+ ax_local->seq_num = 0x1f;
+ }
+
+ if (isr & ISR_TXPAGES) {
+ netif_dbg(ax_local, intr, ndev, " TXPAGES interrupt\n");
+ set_bit(EVENT_TX, &ax_local->flags);
+ }
+
+ if (isr & ISR_LINK) {
+ netif_dbg(ax_local, intr, ndev, " Link change interrupt\n");
+ phy_mac_interrupt(ax_local->ndev->phydev);
+ }
+
+ if (isr & ISR_RXPKT) {
+ netif_dbg(ax_local, intr, ndev, " RX interrupt\n");
+ todo = ax88796c_receive(ax_local->ndev);
+ }
+
+ return todo;
+}
+
+static irqreturn_t ax88796c_interrupt(int irq, void *dev_instance)
+{
+ struct ax88796c_device *ax_local;
+ struct net_device *ndev;
+
+ ndev = dev_instance;
+ if (!ndev) {
+ pr_err("irq %d for unknown device.\n", irq);
+ return IRQ_RETVAL(0);
+ }
+ ax_local = to_ax88796c_device(ndev);
+
+ disable_irq_nosync(irq);
+
+ netif_dbg(ax_local, intr, ndev, "Interrupt occurred\n");
+
+ set_bit(EVENT_INTR, &ax_local->flags);
+ schedule_work(&ax_local->ax_work);
+
+ return IRQ_HANDLED;
+}
+
+static void ax88796c_work(struct work_struct *work)
+{
+ struct ax88796c_device *ax_local =
+ container_of(work, struct ax88796c_device, ax_work);
+
+ mutex_lock(&ax_local->spi_lock);
+
+ if (test_bit(EVENT_SET_MULTI, &ax_local->flags)) {
+ ax88796c_set_hw_multicast(ax_local->ndev);
+ clear_bit(EVENT_SET_MULTI, &ax_local->flags);
+ }
+
+ if (test_bit(EVENT_INTR, &ax_local->flags)) {
+ AX_WRITE(&ax_local->ax_spi, IMR_MASKALL, P0_IMR);
+
+ while (ax88796c_process_isr(ax_local))
+ /* nothing */;
+
+ clear_bit(EVENT_INTR, &ax_local->flags);
+
+ AX_WRITE(&ax_local->ax_spi, IMR_DEFAULT, P0_IMR);
+
+ enable_irq(ax_local->ndev->irq);
+ }
+
+ if (test_bit(EVENT_TX, &ax_local->flags)) {
+ while (skb_queue_len(&ax_local->tx_wait_q)) {
+ if (!ax88796c_hard_xmit(ax_local))
+ break;
+ }
+
+ clear_bit(EVENT_TX, &ax_local->flags);
+
+ if (netif_queue_stopped(ax_local->ndev) &&
+ (skb_queue_len(&ax_local->tx_wait_q) < TX_QUEUE_LOW_WATER))
+ netif_wake_queue(ax_local->ndev);
+ }
+
+ mutex_unlock(&ax_local->spi_lock);
+}
+
+static void ax88796c_get_stats64(struct net_device *ndev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
+ u32 rx_frame_errors = 0, rx_crc_errors = 0;
+ u32 rx_dropped = 0, tx_dropped = 0;
+ unsigned int start;
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ struct ax88796c_pcpu_stats *s;
+ u64 rx_packets, rx_bytes;
+ u64 tx_packets, tx_bytes;
+
+ s = per_cpu_ptr(ax_local->stats, cpu);
+
+ do {
+ start = u64_stats_fetch_begin_irq(&s->syncp);
+ rx_packets = u64_stats_read(&s->rx_packets);
+ rx_bytes = u64_stats_read(&s->rx_bytes);
+ tx_packets = u64_stats_read(&s->tx_packets);
+ tx_bytes = u64_stats_read(&s->tx_bytes);
+ } while (u64_stats_fetch_retry_irq(&s->syncp, start));
+
+ stats->rx_packets += rx_packets;
+ stats->rx_bytes += rx_bytes;
+ stats->tx_packets += tx_packets;
+ stats->tx_bytes += tx_bytes;
+
+ rx_dropped += stats->rx_dropped;
+ tx_dropped += stats->tx_dropped;
+ rx_frame_errors += stats->rx_frame_errors;
+ rx_crc_errors += stats->rx_crc_errors;
+ }
+
+ stats->rx_dropped = rx_dropped;
+ stats->tx_dropped = tx_dropped;
+ stats->rx_frame_errors = rx_frame_errors;
+ stats->rx_crc_errors = rx_crc_errors;
+}
+
+static void ax88796c_set_mac(struct ax88796c_device *ax_local)
+{
+ u16 maccr;
+
+ maccr = (ax_local->link) ? MACCR_RXEN : 0;
+
+ switch (ax_local->speed) {
+ case SPEED_100:
+ maccr |= MACCR_SPEED_100;
+ case SPEED_10:
+ case SPEED_UNKNOWN:
+ break;
+ default:
+ return;
+ }
+
+ switch (ax_local->duplex) {
+ case DUPLEX_FULL:
+ maccr |= MACCR_SPEED_100;
+ case DUPLEX_HALF:
+ case DUPLEX_UNKNOWN:
+ break;
+ default:
+ return;
+ }
+
+ if (ax_local->flowctrl & AX_FC_ANEG &&
+ ax_local->phydev->autoneg) {
+ maccr |= ax_local->pause ? MACCR_RXFC_ENABLE : 0;
+ maccr |= !ax_local->pause != !ax_local->asym_pause ?
+ MACCR_TXFC_ENABLE : 0;
+ } else {
+ maccr |= (ax_local->flowctrl & AX_FC_RX) ? MACCR_RXFC_ENABLE : 0;
+ maccr |= (ax_local->flowctrl & AX_FC_TX) ? MACCR_TXFC_ENABLE : 0;
+ }
+
+ mutex_lock(&ax_local->spi_lock);
+
+ maccr |= AX_READ(&ax_local->ax_spi, P0_MACCR) &
+ ~(MACCR_DUPLEX_FULL | MACCR_SPEED_100 |
+ MACCR_TXFC_ENABLE | MACCR_RXFC_ENABLE);
+ AX_WRITE(&ax_local->ax_spi, maccr, P0_MACCR);
+
+ mutex_unlock(&ax_local->spi_lock);
+}
+
+static void ax88796c_handle_link_change(struct net_device *ndev)
+{
+ struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
+ struct phy_device *phydev = ndev->phydev;
+ bool update = false;
+
+ if (phydev->link && (ax_local->speed != phydev->speed ||
+ ax_local->duplex != phydev->duplex ||
+ ax_local->pause != phydev->pause ||
+ ax_local->asym_pause != phydev->asym_pause)) {
+ ax_local->speed = phydev->speed;
+ ax_local->duplex = phydev->duplex;
+ ax_local->pause = phydev->pause;
+ ax_local->asym_pause = phydev->asym_pause;
+ update = true;
+ }
+
+ if (phydev->link != ax_local->link) {
+ if (!phydev->link) {
+ ax_local->speed = SPEED_UNKNOWN;
+ ax_local->duplex = DUPLEX_UNKNOWN;
+ }
+
+ ax_local->link = phydev->link;
+ update = true;
+ }
+
+ if (update)
+ ax88796c_set_mac(ax_local);
+
+ if (net_ratelimit())
+ phy_print_status(ndev->phydev);
+}
+
+static void ax88796c_set_csums(struct ax88796c_device *ax_local)
+{
+ struct net_device *ndev = ax_local->ndev;
+
+ lockdep_assert_held(&ax_local->spi_lock);
+
+ if (ndev->features & NETIF_F_RXCSUM) {
+ AX_WRITE(&ax_local->ax_spi, COERCR0_DEFAULT, P4_COERCR0);
+ AX_WRITE(&ax_local->ax_spi, COERCR1_DEFAULT, P4_COERCR1);
+ } else {
+ AX_WRITE(&ax_local->ax_spi, 0, P4_COERCR0);
+ AX_WRITE(&ax_local->ax_spi, 0, P4_COERCR1);
+ }
+
+ if (ndev->features & NETIF_F_HW_CSUM) {
+ AX_WRITE(&ax_local->ax_spi, COETCR0_DEFAULT, P4_COETCR0);
+ AX_WRITE(&ax_local->ax_spi, COETCR1_TXPPPE, P4_COETCR1);
+ } else {
+ AX_WRITE(&ax_local->ax_spi, 0, P4_COETCR0);
+ AX_WRITE(&ax_local->ax_spi, 0, P4_COETCR1);
+ }
+}
+
+static int
+ax88796c_open(struct net_device *ndev)
+{
+ struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
+ unsigned long irq_flag = 0;
+ int fc = AX_FC_NONE;
+ int ret;
+ u16 t;
+
+ ret = request_irq(ndev->irq, ax88796c_interrupt,
+ irq_flag, ndev->name, ndev);
+ if (ret) {
+ netdev_err(ndev, "unable to get IRQ %d (errno=%d).\n",
+ ndev->irq, ret);
+ return ret;
+ }
+
+ mutex_lock(&ax_local->spi_lock);
+
+ ret = ax88796c_soft_reset(ax_local);
+ if (ret < 0) {
+ free_irq(ndev->irq, ndev);
+ mutex_unlock(&ax_local->spi_lock);
+ return ret;
+ }
+ ax_local->seq_num = 0x1f;
+
+ ax88796c_set_mac_addr(ndev);
+ ax88796c_set_csums(ax_local);
+
+ /* Disable stuffing packet */
+ t = AX_READ(&ax_local->ax_spi, P1_RXBSPCR);
+ t &= ~RXBSPCR_STUF_ENABLE;
+ AX_WRITE(&ax_local->ax_spi, t, P1_RXBSPCR);
+
+ /* Enable RX packet process */
+ AX_WRITE(&ax_local->ax_spi, RPPER_RXEN, P1_RPPER);
+
+ t = AX_READ(&ax_local->ax_spi, P0_FER);
+ t |= FER_RXEN | FER_TXEN | FER_BSWAP | FER_IRQ_PULL;
+ AX_WRITE(&ax_local->ax_spi, t, P0_FER);
+
+ /* Setup LED mode */
+ AX_WRITE(&ax_local->ax_spi,
+ (LCR_LED0_EN | LCR_LED0_DUPLEX | LCR_LED1_EN |
+ LCR_LED1_100MODE), P2_LCR0);
+ AX_WRITE(&ax_local->ax_spi,
+ (AX_READ(&ax_local->ax_spi, P2_LCR1) & LCR_LED2_MASK) |
+ LCR_LED2_EN | LCR_LED2_LINK, P2_LCR1);
+
+ /* Disable PHY auto-polling */
+ AX_WRITE(&ax_local->ax_spi, PCR_PHYID(AX88796C_PHY_ID), P2_PCR);
+
+ /* Enable MAC interrupts */
+ AX_WRITE(&ax_local->ax_spi, IMR_DEFAULT, P0_IMR);
+
+ mutex_unlock(&ax_local->spi_lock);
+
+ /* Setup flow-control configuration */
+ phy_support_asym_pause(ax_local->phydev);
+
+ if (ax_local->phydev->advertising &&
+ (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ ax_local->phydev->advertising) ||
+ linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ ax_local->phydev->advertising)))
+ fc |= AX_FC_ANEG;
+
+ fc |= linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ ax_local->phydev->advertising) ? AX_FC_RX : 0;
+ fc |= (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ ax_local->phydev->advertising) !=
+ linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ ax_local->phydev->advertising)) ? AX_FC_TX : 0;
+ ax_local->flowctrl = fc;
+
+ phy_start(ax_local->ndev->phydev);
+
+ netif_start_queue(ndev);
+
+ spi_message_init(&ax_local->ax_spi.rx_msg);
+
+ return 0;
+}
+
+static int
+ax88796c_close(struct net_device *ndev)
+{
+ struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
+
+ phy_stop(ndev->phydev);
+
+ /* We lock the mutex early not only to protect the device
+ * against concurrent access, but also avoid waking up the
+ * queue in ax88796c_work(). phy_stop() needs to be called
+ * before because it locks the mutex to access SPI.
+ */
+ mutex_lock(&ax_local->spi_lock);
+
+ netif_stop_queue(ndev);
+
+ /* No more work can be scheduled now. Make any pending work,
+ * including one already waiting for the mutex to be unlocked,
+ * NOP.
+ */
+ netif_dbg(ax_local, ifdown, ndev, "clearing bits\n");
+ clear_bit(EVENT_SET_MULTI, &ax_local->flags);
+ clear_bit(EVENT_INTR, &ax_local->flags);
+ clear_bit(EVENT_TX, &ax_local->flags);
+
+ /* Disable MAC interrupts */
+ AX_WRITE(&ax_local->ax_spi, IMR_MASKALL, P0_IMR);
+ __skb_queue_purge(&ax_local->tx_wait_q);
+ ax88796c_soft_reset(ax_local);
+
+ mutex_unlock(&ax_local->spi_lock);
+
+ cancel_work_sync(&ax_local->ax_work);
+
+ free_irq(ndev->irq, ndev);
+
+ return 0;
+}
+
+static int
+ax88796c_set_features(struct net_device *ndev, netdev_features_t features)
+{
+ struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
+ netdev_features_t changed = features ^ ndev->features;
+
+ if (!(changed & (NETIF_F_RXCSUM | NETIF_F_HW_CSUM)))
+ return 0;
+
+ ndev->features = features;
+
+ if (changed & (NETIF_F_RXCSUM | NETIF_F_HW_CSUM))
+ ax88796c_set_csums(ax_local);
+
+ return 0;
+}
+
+static const struct net_device_ops ax88796c_netdev_ops = {
+ .ndo_open = ax88796c_open,
+ .ndo_stop = ax88796c_close,
+ .ndo_start_xmit = ax88796c_start_xmit,
+ .ndo_get_stats64 = ax88796c_get_stats64,
+ .ndo_do_ioctl = ax88796c_ioctl,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_set_features = ax88796c_set_features,
+};
+
+static int ax88796c_hard_reset(struct ax88796c_device *ax_local)
+{
+ struct device *dev = (struct device *)&ax_local->spi->dev;
+ struct gpio_desc *reset_gpio;
+
+ /* reset info */
+ reset_gpio = gpiod_get(dev, "reset", 0);
+ if (IS_ERR(reset_gpio)) {
+ dev_err(dev, "Could not get 'reset' GPIO: %ld", PTR_ERR(reset_gpio));
+ return PTR_ERR(reset_gpio);
+ }
+
+ /* set reset */
+ gpiod_direction_output(reset_gpio, 1);
+ msleep(100);
+ gpiod_direction_output(reset_gpio, 0);
+ gpiod_put(reset_gpio);
+ msleep(20);
+
+ return 0;
+}
+
+static int ax88796c_probe(struct spi_device *spi)
+{
+ char phy_id[MII_BUS_ID_SIZE + 3];
+ struct ax88796c_device *ax_local;
+ struct net_device *ndev;
+ u16 temp;
+ int ret;
+
+ ndev = devm_alloc_etherdev(&spi->dev, sizeof(*ax_local));
+ if (!ndev)
+ return -ENOMEM;
+
+ SET_NETDEV_DEV(ndev, &spi->dev);
+
+ ax_local = to_ax88796c_device(ndev);
+
+ dev_set_drvdata(&spi->dev, ax_local);
+ ax_local->spi = spi;
+ ax_local->ax_spi.spi = spi;
+
+ ax_local->stats =
+ devm_netdev_alloc_pcpu_stats(&spi->dev,
+ struct ax88796c_pcpu_stats);
+ if (!ax_local->stats)
+ return -ENOMEM;
+
+ ax_local->ndev = ndev;
+ ax_local->priv_flags |= comp ? AX_CAP_COMP : 0;
+ ax_local->msg_enable = msg_enable;
+ mutex_init(&ax_local->spi_lock);
+
+ ax_local->mdiobus = devm_mdiobus_alloc(&spi->dev);
+ if (!ax_local->mdiobus)
+ return -ENOMEM;
+
+ ax_local->mdiobus->priv = ax_local;
+ ax_local->mdiobus->read = ax88796c_mdio_read;
+ ax_local->mdiobus->write = ax88796c_mdio_write;
+ ax_local->mdiobus->name = "ax88976c-mdiobus";
+ ax_local->mdiobus->phy_mask = (u32)~BIT(AX88796C_PHY_ID);
+ ax_local->mdiobus->parent = &spi->dev;
+
+ snprintf(ax_local->mdiobus->id, MII_BUS_ID_SIZE,
+ "ax88796c-%s.%u", dev_name(&spi->dev), spi->chip_select);
+
+ ret = devm_mdiobus_register(&spi->dev, ax_local->mdiobus);
+ if (ret < 0) {
+ dev_err(&spi->dev, "Could not register MDIO bus\n");
+ return ret;
+ }
+
+ if (netif_msg_probe(ax_local)) {
+ dev_info(&spi->dev, "AX88796C-SPI Configuration:\n");
+ dev_info(&spi->dev, " Compression : %s\n",
+ ax_local->priv_flags & AX_CAP_COMP ? "ON" : "OFF");
+ }
+
+ ndev->irq = spi->irq;
+ ndev->netdev_ops = &ax88796c_netdev_ops;
+ ndev->ethtool_ops = &ax88796c_ethtool_ops;
+ ndev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
+ ndev->features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
+ ndev->needed_headroom = TX_OVERHEAD;
+ ndev->needed_tailroom = TX_EOP_SIZE;
+
+ mutex_lock(&ax_local->spi_lock);
+
+ /* ax88796c gpio reset */
+ ax88796c_hard_reset(ax_local);
+
+ /* Reset AX88796C */
+ ret = ax88796c_soft_reset(ax_local);
+ if (ret < 0) {
+ ret = -ENODEV;
+ mutex_unlock(&ax_local->spi_lock);
+ goto err;
+ }
+ /* Check board revision */
+ temp = AX_READ(&ax_local->ax_spi, P2_CRIR);
+ if ((temp & 0xF) != 0x0) {
+ dev_err(&spi->dev, "spi read failed: %d\n", temp);
+ ret = -ENODEV;
+ mutex_unlock(&ax_local->spi_lock);
+ goto err;
+ }
+
+ /*Reload EEPROM*/
+ ax88796c_reload_eeprom(ax_local);
+
+ ax88796c_load_mac_addr(ndev);
+
+ if (netif_msg_probe(ax_local))
+ dev_info(&spi->dev,
+ "irq %d, MAC addr %02X:%02X:%02X:%02X:%02X:%02X\n",
+ ndev->irq,
+ ndev->dev_addr[0], ndev->dev_addr[1],
+ ndev->dev_addr[2], ndev->dev_addr[3],
+ ndev->dev_addr[4], ndev->dev_addr[5]);
+
+ /* Disable power saving */
+ AX_WRITE(&ax_local->ax_spi, (AX_READ(&ax_local->ax_spi, P0_PSCR)
+ & PSCR_PS_MASK) | PSCR_PS_D0, P0_PSCR);
+
+ mutex_unlock(&ax_local->spi_lock);
+
+ INIT_WORK(&ax_local->ax_work, ax88796c_work);
+
+ skb_queue_head_init(&ax_local->tx_wait_q);
+
+ snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT,
+ ax_local->mdiobus->id, AX88796C_PHY_ID);
+ ax_local->phydev = phy_connect(ax_local->ndev, phy_id,
+ ax88796c_handle_link_change,
+ PHY_INTERFACE_MODE_MII);
+ if (IS_ERR(ax_local->phydev)) {
+ ret = PTR_ERR(ax_local->phydev);
+ goto err;
+ }
+ ax_local->phydev->irq = PHY_POLL;
+
+ ret = devm_register_netdev(&spi->dev, ndev);
+ if (ret) {
+ dev_err(&spi->dev, "failed to register a network device\n");
+ goto err_phy_dis;
+ }
+
+ netif_info(ax_local, probe, ndev, "%s %s registered\n",
+ dev_driver_string(&spi->dev),
+ dev_name(&spi->dev));
+ phy_attached_info(ax_local->phydev);
+
+ return 0;
+
+err_phy_dis:
+ phy_disconnect(ax_local->phydev);
+err:
+ return ret;
+}
+
+static int ax88796c_remove(struct spi_device *spi)
+{
+ struct ax88796c_device *ax_local = dev_get_drvdata(&spi->dev);
+ struct net_device *ndev = ax_local->ndev;
+
+ phy_disconnect(ndev->phydev);
+
+ netif_info(ax_local, probe, ndev, "removing network device %s %s\n",
+ dev_driver_string(&spi->dev),
+ dev_name(&spi->dev));
+
+ return 0;
+}
+
+static const struct of_device_id ax88796c_dt_ids[] = {
+ { .compatible = "asix,ax88796c" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ax88796c_dt_ids);
+
+static const struct spi_device_id asix_id[] = {
+ { "ax88796c", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, asix_id);
+
+static struct spi_driver ax88796c_spi_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = of_match_ptr(ax88796c_dt_ids),
+ },
+ .probe = ax88796c_probe,
+ .remove = ax88796c_remove,
+ .id_table = asix_id,
+};
+
+static __init int ax88796c_spi_init(void)
+{
+ int ret;
+
+ bitmap_zero(ax88796c_no_regs_mask, AX88796C_REGDUMP_LEN);
+ ret = bitmap_parse(no_regs_list, 35,
+ ax88796c_no_regs_mask, AX88796C_REGDUMP_LEN);
+ if (ret) {
+ bitmap_fill(ax88796c_no_regs_mask, AX88796C_REGDUMP_LEN);
+ pr_err("Invalid bitmap description, masking all registers\n");
+ }
+
+ return spi_register_driver(&ax88796c_spi_driver);
+}
+
+static __exit void ax88796c_spi_exit(void)
+{
+ spi_unregister_driver(&ax88796c_spi_driver);
+}
+
+module_init(ax88796c_spi_init);
+module_exit(ax88796c_spi_exit);
+
+MODULE_AUTHOR("Łukasz Stelmach <l.stelmach@samsung.com>");
+MODULE_DESCRIPTION("ASIX AX88796C SPI Ethernet driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/asix/ax88796c_main.h b/drivers/net/ethernet/asix/ax88796c_main.h
new file mode 100644
index 000000000000..80263c3cef75
--- /dev/null
+++ b/drivers/net/ethernet/asix/ax88796c_main.h
@@ -0,0 +1,568 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2010 ASIX Electronics Corporation
+ * Copyright (c) 2020 Samsung Electronics
+ *
+ * ASIX AX88796C SPI Fast Ethernet Linux driver
+ */
+
+#ifndef _AX88796C_MAIN_H
+#define _AX88796C_MAIN_H
+
+#include <linux/netdevice.h>
+#include <linux/mii.h>
+
+#include "ax88796c_spi.h"
+
+/* These identify the driver base version and may not be removed. */
+#define DRV_NAME "ax88796c"
+#define ADP_NAME "ASIX AX88796C SPI Ethernet Adapter"
+
+#define TX_QUEUE_HIGH_WATER 45 /* Tx queue high water mark */
+#define TX_QUEUE_LOW_WATER 20 /* Tx queue low water mark */
+
+#define AX88796C_REGDUMP_LEN 256
+#define AX88796C_PHY_REGDUMP_LEN 14
+#define AX88796C_PHY_ID 0x10
+
+#define TX_OVERHEAD 8
+#define TX_EOP_SIZE 4
+
+#define AX_MCAST_FILTER_SIZE 8
+#define AX_MAX_MCAST 64
+#define AX_MAX_CLK 80000000
+#define TX_HDR_SOP_DICF 0x8000
+#define TX_HDR_SOP_CPHI 0x4000
+#define TX_HDR_SOP_INT 0x2000
+#define TX_HDR_SOP_MDEQ 0x1000
+#define TX_HDR_SOP_PKTLEN 0x07FF
+#define TX_HDR_SOP_SEQNUM 0xF800
+#define TX_HDR_SOP_PKTLENBAR 0x07FF
+
+#define TX_HDR_SEG_FS 0x8000
+#define TX_HDR_SEG_LS 0x4000
+#define TX_HDR_SEG_SEGNUM 0x3800
+#define TX_HDR_SEG_SEGLEN 0x0700
+#define TX_HDR_SEG_EOFST 0xC000
+#define TX_HDR_SEG_SOFST 0x3800
+#define TX_HDR_SEG_SEGLENBAR 0x07FF
+
+#define TX_HDR_EOP_SEQNUM 0xF800
+#define TX_HDR_EOP_PKTLEN 0x07FF
+#define TX_HDR_EOP_SEQNUMBAR 0xF800
+#define TX_HDR_EOP_PKTLENBAR 0x07FF
+
+/* Rx header fields mask */
+#define RX_HDR1_MCBC 0x8000
+#define RX_HDR1_STUFF_PKT 0x4000
+#define RX_HDR1_MII_ERR 0x2000
+#define RX_HDR1_CRC_ERR 0x1000
+#define RX_HDR1_PKT_LEN 0x07FF
+
+#define RX_HDR2_SEQ_NUM 0xF800
+#define RX_HDR2_PKT_LEN_BAR 0x7FFF
+
+#define RX_HDR3_PE 0x8000
+#define RX_HDR3_L3_TYPE_IPV4V6 0x6000
+#define RX_HDR3_L3_TYPE_IP 0x4000
+#define RX_HDR3_L3_TYPE_IPV6 0x2000
+#define RX_HDR3_L4_TYPE_ICMPV6 0x1400
+#define RX_HDR3_L4_TYPE_TCP 0x1000
+#define RX_HDR3_L4_TYPE_IGMP 0x0c00
+#define RX_HDR3_L4_TYPE_ICMP 0x0800
+#define RX_HDR3_L4_TYPE_UDP 0x0400
+#define RX_HDR3_L3_ERR 0x0200
+#define RX_HDR3_L4_ERR 0x0100
+#define RX_HDR3_PRIORITY(x) ((x) << 4)
+#define RX_HDR3_STRIP 0x0008
+#define RX_HDR3_VLAN_ID 0x0007
+
+struct ax88796c_pcpu_stats {
+ u64_stats_t rx_packets;
+ u64_stats_t rx_bytes;
+ u64_stats_t tx_packets;
+ u64_stats_t tx_bytes;
+ struct u64_stats_sync syncp;
+ u32 rx_dropped;
+ u32 tx_dropped;
+ u32 rx_frame_errors;
+ u32 rx_crc_errors;
+};
+
+struct ax88796c_device {
+ struct spi_device *spi;
+ struct net_device *ndev;
+ struct ax88796c_pcpu_stats __percpu *stats;
+
+ struct work_struct ax_work;
+
+ struct mutex spi_lock; /* device access */
+
+ struct sk_buff_head tx_wait_q;
+
+ struct axspi_data ax_spi;
+
+ struct mii_bus *mdiobus;
+ struct phy_device *phydev;
+
+ int msg_enable;
+
+ u16 seq_num;
+
+ u8 multi_filter[AX_MCAST_FILTER_SIZE];
+
+ int link;
+ int speed;
+ int duplex;
+ int pause;
+ int asym_pause;
+ int flowctrl;
+ #define AX_FC_NONE 0
+ #define AX_FC_RX BIT(0)
+ #define AX_FC_TX BIT(1)
+ #define AX_FC_ANEG BIT(2)
+
+ u32 priv_flags;
+ #define AX_CAP_COMP BIT(0)
+ #define AX_PRIV_FLAGS_MASK (AX_CAP_COMP)
+
+ unsigned long flags;
+ #define EVENT_INTR BIT(0)
+ #define EVENT_TX BIT(1)
+ #define EVENT_SET_MULTI BIT(2)
+
+};
+
+#define to_ax88796c_device(ndev) ((struct ax88796c_device *)netdev_priv(ndev))
+
+enum skb_state {
+ illegal = 0,
+ tx_done,
+ rx_done,
+ rx_err,
+};
+
+struct skb_data {
+ enum skb_state state;
+ size_t len;
+};
+
+/* A88796C register definition */
+ /* Definition of PAGE0 */
+#define P0_PSR (0x00)
+ #define PSR_DEV_READY BIT(7)
+ #define PSR_RESET (0 << 15)
+ #define PSR_RESET_CLR BIT(15)
+#define P0_BOR (0x02)
+#define P0_FER (0x04)
+ #define FER_IPALM BIT(0)
+ #define FER_DCRC BIT(1)
+ #define FER_RH3M BIT(2)
+ #define FER_HEADERSWAP BIT(7)
+ #define FER_WSWAP BIT(8)
+ #define FER_BSWAP BIT(9)
+ #define FER_INTHI BIT(10)
+ #define FER_INTLO (0 << 10)
+ #define FER_IRQ_PULL BIT(11)
+ #define FER_RXEN BIT(14)
+ #define FER_TXEN BIT(15)
+#define P0_ISR (0x06)
+ #define ISR_RXPKT BIT(0)
+ #define ISR_MDQ BIT(4)
+ #define ISR_TXT BIT(5)
+ #define ISR_TXPAGES BIT(6)
+ #define ISR_TXERR BIT(8)
+ #define ISR_LINK BIT(9)
+#define P0_IMR (0x08)
+ #define IMR_RXPKT BIT(0)
+ #define IMR_MDQ BIT(4)
+ #define IMR_TXT BIT(5)
+ #define IMR_TXPAGES BIT(6)
+ #define IMR_TXERR BIT(8)
+ #define IMR_LINK BIT(9)
+ #define IMR_MASKALL (0xFFFF)
+ #define IMR_DEFAULT (IMR_TXERR)
+#define P0_WFCR (0x0A)
+ #define WFCR_PMEIND BIT(0) /* PME indication */
+ #define WFCR_PMETYPE BIT(1) /* PME I/O type */
+ #define WFCR_PMEPOL BIT(2) /* PME polarity */
+ #define WFCR_PMERST BIT(3) /* Reset PME */
+ #define WFCR_SLEEP BIT(4) /* Enable sleep mode */
+ #define WFCR_WAKEUP BIT(5) /* Enable wakeup mode */
+ #define WFCR_WAITEVENT BIT(6) /* Reserved */
+ #define WFCR_CLRWAKE BIT(7) /* Clear wakeup */
+ #define WFCR_LINKCH BIT(8) /* Enable link change */
+ #define WFCR_MAGICP BIT(9) /* Enable magic packet */
+ #define WFCR_WAKEF BIT(10) /* Enable wakeup frame */
+ #define WFCR_PMEEN BIT(11) /* Enable PME pin */
+ #define WFCR_LINKCHS BIT(12) /* Link change status */
+ #define WFCR_MAGICPS BIT(13) /* Magic packet status */
+ #define WFCR_WAKEFS BIT(14) /* Wakeup frame status */
+ #define WFCR_PMES BIT(15) /* PME pin status */
+#define P0_PSCR (0x0C)
+ #define PSCR_PS_MASK (0xFFF0)
+ #define PSCR_PS_D0 (0)
+ #define PSCR_PS_D1 BIT(0)
+ #define PSCR_PS_D2 BIT(1)
+ #define PSCR_FPS BIT(3) /* Enable fiber mode PS */
+ #define PSCR_SWPS BIT(4) /* Enable software */
+ /* PS control */
+ #define PSCR_WOLPS BIT(5) /* Enable WOL PS */
+ #define PSCR_SWWOL BIT(6) /* Enable software select */
+ /* WOL PS */
+ #define PSCR_PHYOSC BIT(7) /* Internal PHY OSC control */
+ #define PSCR_FOFEF BIT(8) /* Force PHY generate FEF */
+ #define PSCR_FOF BIT(9) /* Force PHY in fiber mode */
+ #define PSCR_PHYPD BIT(10) /* PHY power down. */
+ /* Active high */
+ #define PSCR_PHYRST BIT(11) /* PHY reset signal. */
+ /* Active low */
+ #define PSCR_PHYCSIL BIT(12) /* PHY cable energy detect */
+ #define PSCR_PHYCOFF BIT(13) /* PHY cable off */
+ #define PSCR_PHYLINK BIT(14) /* PHY link status */
+ #define PSCR_EEPOK BIT(15) /* EEPROM load complete */
+#define P0_MACCR (0x0E)
+ #define MACCR_RXEN BIT(0) /* Enable RX */
+ #define MACCR_DUPLEX_FULL BIT(1) /* 1: Full, 0: Half */
+ #define MACCR_SPEED_100 BIT(2) /* 1: 100Mbps, 0: 10Mbps */
+ #define MACCR_RXFC_ENABLE BIT(3)
+ #define MACCR_RXFC_MASK 0xFFF7
+ #define MACCR_TXFC_ENABLE BIT(4)
+ #define MACCR_TXFC_MASK 0xFFEF
+ #define MACCR_PSI BIT(6) /* Software Cable-Off */
+ /* Power Saving Interrupt */
+ #define MACCR_PF BIT(7)
+ #define MACCR_PMM_BITS 8
+ #define MACCR_PMM_MASK (0x1F00)
+ #define MACCR_PMM_RESET BIT(8)
+ #define MACCR_PMM_WAIT (2 << 8)
+ #define MACCR_PMM_READY (3 << 8)
+ #define MACCR_PMM_D1 (4 << 8)
+ #define MACCR_PMM_D2 (5 << 8)
+ #define MACCR_PMM_WAKE (7 << 8)
+ #define MACCR_PMM_D1_WAKE (8 << 8)
+ #define MACCR_PMM_D2_WAKE (9 << 8)
+ #define MACCR_PMM_SLEEP (10 << 8)
+ #define MACCR_PMM_PHY_RESET (11 << 8)
+ #define MACCR_PMM_SOFT_D1 (16 << 8)
+ #define MACCR_PMM_SOFT_D2 (17 << 8)
+#define P0_TFBFCR (0x10)
+ #define TFBFCR_SCHE_FREE_PAGE 0xE07F
+ #define TFBFCR_FREE_PAGE_BITS 0x07
+ #define TFBFCR_FREE_PAGE_LATCH BIT(6)
+ #define TFBFCR_SET_FREE_PAGE(x) (((x) & 0x3F) << TFBFCR_FREE_PAGE_BITS)
+ #define TFBFCR_TX_PAGE_SET BIT(13)
+ #define TFBFCR_MANU_ENTX BIT(15)
+ #define TX_FREEBUF_MASK 0x003F
+ #define TX_DPTSTART 0x4000
+
+#define P0_TSNR (0x12)
+ #define TXNR_TXB_ERR BIT(5)
+ #define TXNR_TXB_IDLE BIT(6)
+ #define TSNR_PKT_CNT(x) (((x) & 0x3F) << 8)
+ #define TXNR_TXB_REINIT BIT(14)
+ #define TSNR_TXB_START BIT(15)
+#define P0_RTDPR (0x14)
+#define P0_RXBCR1 (0x16)
+ #define RXBCR1_RXB_DISCARD BIT(14)
+ #define RXBCR1_RXB_START BIT(15)
+#define P0_RXBCR2 (0x18)
+ #define RXBCR2_PKT_MASK (0xFF)
+ #define RXBCR2_RXPC_MASK (0x7F)
+ #define RXBCR2_RXB_READY BIT(13)
+ #define RXBCR2_RXB_IDLE BIT(14)
+ #define RXBCR2_RXB_REINIT BIT(15)
+#define P0_RTWCR (0x1A)
+ #define RTWCR_RXWC_MASK (0x3FFF)
+ #define RTWCR_RX_LATCH BIT(15)
+#define P0_RCPHR (0x1C)
+
+ /* Definition of PAGE1 */
+#define P1_RPPER (0x22)
+ #define RPPER_RXEN BIT(0)
+#define P1_MRCR (0x28)
+#define P1_MDR (0x2A)
+#define P1_RMPR (0x2C)
+#define P1_TMPR (0x2E)
+#define P1_RXBSPCR (0x30)
+ #define RXBSPCR_STUF_WORD_CNT(x) (((x) & 0x7000) >> 12)
+ #define RXBSPCR_STUF_ENABLE BIT(15)
+#define P1_MCR (0x32)
+ #define MCR_SBP BIT(8)
+ #define MCR_SM BIT(9)
+ #define MCR_CRCENLAN BIT(11)
+ #define MCR_STP BIT(12)
+ /* Definition of PAGE2 */
+#define P2_CIR (0x42)
+#define P2_PCR (0x44)
+ #define PCR_POLL_EN BIT(0)
+ #define PCR_POLL_FLOWCTRL BIT(1)
+ #define PCR_POLL_BMCR BIT(2)
+ #define PCR_PHYID(x) ((x) << 8)
+#define P2_PHYSR (0x46)
+#define P2_MDIODR (0x48)
+#define P2_MDIOCR (0x4A)
+ #define MDIOCR_RADDR(x) ((x) & 0x1F)
+ #define MDIOCR_FADDR(x) (((x) & 0x1F) << 8)
+ #define MDIOCR_VALID BIT(13)
+ #define MDIOCR_READ BIT(14)
+ #define MDIOCR_WRITE BIT(15)
+#define P2_LCR0 (0x4C)
+ #define LCR_LED0_EN BIT(0)
+ #define LCR_LED0_100MODE BIT(1)
+ #define LCR_LED0_DUPLEX BIT(2)
+ #define LCR_LED0_LINK BIT(3)
+ #define LCR_LED0_ACT BIT(4)
+ #define LCR_LED0_COL BIT(5)
+ #define LCR_LED0_10MODE BIT(6)
+ #define LCR_LED0_DUPCOL BIT(7)
+ #define LCR_LED1_EN BIT(8)
+ #define LCR_LED1_100MODE BIT(9)
+ #define LCR_LED1_DUPLEX BIT(10)
+ #define LCR_LED1_LINK BIT(11)
+ #define LCR_LED1_ACT BIT(12)
+ #define LCR_LED1_COL BIT(13)
+ #define LCR_LED1_10MODE BIT(14)
+ #define LCR_LED1_DUPCOL BIT(15)
+#define P2_LCR1 (0x4E)
+ #define LCR_LED2_MASK (0xFF00)
+ #define LCR_LED2_EN BIT(0)
+ #define LCR_LED2_100MODE BIT(1)
+ #define LCR_LED2_DUPLEX BIT(2)
+ #define LCR_LED2_LINK BIT(3)
+ #define LCR_LED2_ACT BIT(4)
+ #define LCR_LED2_COL BIT(5)
+ #define LCR_LED2_10MODE BIT(6)
+ #define LCR_LED2_DUPCOL BIT(7)
+#define P2_IPGCR (0x50)
+#define P2_CRIR (0x52)
+#define P2_FLHWCR (0x54)
+#define P2_RXCR (0x56)
+ #define RXCR_PRO BIT(0)
+ #define RXCR_AMALL BIT(1)
+ #define RXCR_SEP BIT(2)
+ #define RXCR_AB BIT(3)
+ #define RXCR_AM BIT(4)
+ #define RXCR_AP BIT(5)
+ #define RXCR_ARP BIT(6)
+#define P2_JLCR (0x58)
+#define P2_MPLR (0x5C)
+
+ /* Definition of PAGE3 */
+#define P3_MACASR0 (0x62)
+ #define P3_MACASR(x) (P3_MACASR0 + 2 * (x))
+ #define MACASR_LOWBYTE_MASK 0x00FF
+ #define MACASR_HIGH_BITS 0x08
+#define P3_MACASR1 (0x64)
+#define P3_MACASR2 (0x66)
+#define P3_MFAR01 (0x68)
+#define P3_MFAR_BASE (0x68)
+ #define P3_MFAR(x) (P3_MFAR_BASE + 2 * (x))
+
+#define P3_MFAR23 (0x6A)
+#define P3_MFAR45 (0x6C)
+#define P3_MFAR67 (0x6E)
+#define P3_VID0FR (0x70)
+#define P3_VID1FR (0x72)
+#define P3_EECSR (0x74)
+#define P3_EEDR (0x76)
+#define P3_EECR (0x78)
+ #define EECR_ADDR_MASK (0x00FF)
+ #define EECR_READ_ACT BIT(8)
+ #define EECR_WRITE_ACT BIT(9)
+ #define EECR_WRITE_DISABLE BIT(10)
+ #define EECR_WRITE_ENABLE BIT(11)
+ #define EECR_EE_READY BIT(13)
+ #define EECR_RELOAD BIT(14)
+ #define EECR_RESET BIT(15)
+#define P3_TPCR (0x7A)
+ #define TPCR_PATT_MASK (0xFF)
+ #define TPCR_RAND_PKT_EN BIT(14)
+ #define TPCR_FIXED_PKT_EN BIT(15)
+#define P3_TPLR (0x7C)
+ /* Definition of PAGE4 */
+#define P4_SPICR (0x8A)
+ #define SPICR_RCEN BIT(0)
+ #define SPICR_QCEN BIT(1)
+ #define SPICR_RBRE BIT(3)
+ #define SPICR_PMM BIT(4)
+ #define SPICR_LOOPBACK BIT(8)
+ #define SPICR_CORE_RES_CLR BIT(10)
+ #define SPICR_SPI_RES_CLR BIT(11)
+#define P4_SPIISMR (0x8C)
+
+#define P4_COERCR0 (0x92)
+ #define COERCR0_RXIPCE BIT(0)
+ #define COERCR0_RXIPVE BIT(1)
+ #define COERCR0_RXV6PE BIT(2)
+ #define COERCR0_RXTCPE BIT(3)
+ #define COERCR0_RXUDPE BIT(4)
+ #define COERCR0_RXICMP BIT(5)
+ #define COERCR0_RXIGMP BIT(6)
+ #define COERCR0_RXICV6 BIT(7)
+
+ #define COERCR0_RXTCPV6 BIT(8)
+ #define COERCR0_RXUDPV6 BIT(9)
+ #define COERCR0_RXICMV6 BIT(10)
+ #define COERCR0_RXIGMV6 BIT(11)
+ #define COERCR0_RXICV6V6 BIT(12)
+
+ #define COERCR0_DEFAULT (COERCR0_RXIPCE | COERCR0_RXV6PE | \
+ COERCR0_RXTCPE | COERCR0_RXUDPE | \
+ COERCR0_RXTCPV6 | COERCR0_RXUDPV6)
+#define P4_COERCR1 (0x94)
+ #define COERCR1_IPCEDP BIT(0)
+ #define COERCR1_IPVEDP BIT(1)
+ #define COERCR1_V6VEDP BIT(2)
+ #define COERCR1_TCPEDP BIT(3)
+ #define COERCR1_UDPEDP BIT(4)
+ #define COERCR1_ICMPDP BIT(5)
+ #define COERCR1_IGMPDP BIT(6)
+ #define COERCR1_ICV6DP BIT(7)
+ #define COERCR1_RX64TE BIT(8)
+ #define COERCR1_RXPPPE BIT(9)
+ #define COERCR1_TCP6DP BIT(10)
+ #define COERCR1_UDP6DP BIT(11)
+ #define COERCR1_IC6DP BIT(12)
+ #define COERCR1_IG6DP BIT(13)
+ #define COERCR1_ICV66DP BIT(14)
+ #define COERCR1_RPCE BIT(15)
+
+ #define COERCR1_DEFAULT (COERCR1_RXPPPE)
+
+#define P4_COETCR0 (0x96)
+ #define COETCR0_TXIP BIT(0)
+ #define COETCR0_TXTCP BIT(1)
+ #define COETCR0_TXUDP BIT(2)
+ #define COETCR0_TXICMP BIT(3)
+ #define COETCR0_TXIGMP BIT(4)
+ #define COETCR0_TXICV6 BIT(5)
+ #define COETCR0_TXTCPV6 BIT(8)
+ #define COETCR0_TXUDPV6 BIT(9)
+ #define COETCR0_TXICMV6 BIT(10)
+ #define COETCR0_TXIGMV6 BIT(11)
+ #define COETCR0_TXICV6V6 BIT(12)
+
+ #define COETCR0_DEFAULT (COETCR0_TXIP | COETCR0_TXTCP | \
+ COETCR0_TXUDP | COETCR0_TXTCPV6 | \
+ COETCR0_TXUDPV6)
+#define P4_COETCR1 (0x98)
+ #define COETCR1_TX64TE BIT(0)
+ #define COETCR1_TXPPPE BIT(1)
+
+#define P4_COECEDR (0x9A)
+#define P4_L2CECR (0x9C)
+
+ /* Definition of PAGE5 */
+#define P5_WFTR (0xA2)
+ #define WFTR_2MS (0x01)
+ #define WFTR_4MS (0x02)
+ #define WFTR_8MS (0x03)
+ #define WFTR_16MS (0x04)
+ #define WFTR_32MS (0x05)
+ #define WFTR_64MS (0x06)
+ #define WFTR_128MS (0x07)
+ #define WFTR_256MS (0x08)
+ #define WFTR_512MS (0x09)
+ #define WFTR_1024MS (0x0A)
+ #define WFTR_2048MS (0x0B)
+ #define WFTR_4096MS (0x0C)
+ #define WFTR_8192MS (0x0D)
+ #define WFTR_16384MS (0x0E)
+ #define WFTR_32768MS (0x0F)
+#define P5_WFCCR (0xA4)
+#define P5_WFCR03 (0xA6)
+ #define WFCR03_F0_EN BIT(0)
+ #define WFCR03_F1_EN BIT(4)
+ #define WFCR03_F2_EN BIT(8)
+ #define WFCR03_F3_EN BIT(12)
+#define P5_WFCR47 (0xA8)
+ #define WFCR47_F4_EN BIT(0)
+ #define WFCR47_F5_EN BIT(4)
+ #define WFCR47_F6_EN BIT(8)
+ #define WFCR47_F7_EN BIT(12)
+#define P5_WF0BMR0 (0xAA)
+#define P5_WF0BMR1 (0xAC)
+#define P5_WF0CR (0xAE)
+#define P5_WF0OBR (0xB0)
+#define P5_WF1BMR0 (0xB2)
+#define P5_WF1BMR1 (0xB4)
+#define P5_WF1CR (0xB6)
+#define P5_WF1OBR (0xB8)
+#define P5_WF2BMR0 (0xBA)
+#define P5_WF2BMR1 (0xBC)
+
+ /* Definition of PAGE6 */
+#define P6_WF2CR (0xC2)
+#define P6_WF2OBR (0xC4)
+#define P6_WF3BMR0 (0xC6)
+#define P6_WF3BMR1 (0xC8)
+#define P6_WF3CR (0xCA)
+#define P6_WF3OBR (0xCC)
+#define P6_WF4BMR0 (0xCE)
+#define P6_WF4BMR1 (0xD0)
+#define P6_WF4CR (0xD2)
+#define P6_WF4OBR (0xD4)
+#define P6_WF5BMR0 (0xD6)
+#define P6_WF5BMR1 (0xD8)
+#define P6_WF5CR (0xDA)
+#define P6_WF5OBR (0xDC)
+
+/* Definition of PAGE7 */
+#define P7_WF6BMR0 (0xE2)
+#define P7_WF6BMR1 (0xE4)
+#define P7_WF6CR (0xE6)
+#define P7_WF6OBR (0xE8)
+#define P7_WF7BMR0 (0xEA)
+#define P7_WF7BMR1 (0xEC)
+#define P7_WF7CR (0xEE)
+#define P7_WF7OBR (0xF0)
+#define P7_WFR01 (0xF2)
+#define P7_WFR23 (0xF4)
+#define P7_WFR45 (0xF6)
+#define P7_WFR67 (0xF8)
+#define P7_WFPC0 (0xFA)
+#define P7_WFPC1 (0xFC)
+
+/* Tx headers structure */
+struct tx_sop_header {
+ /* bit 15-11: flags, bit 10-0: packet length */
+ u16 flags_len;
+ /* bit 15-11: sequence number, bit 11-0: packet length bar */
+ u16 seq_lenbar;
+};
+
+struct tx_segment_header {
+ /* bit 15-14: flags, bit 13-11: segment number */
+ /* bit 10-0: segment length */
+ u16 flags_seqnum_seglen;
+ /* bit 15-14: end offset, bit 13-11: start offset */
+ /* bit 10-0: segment length bar */
+ u16 eo_so_seglenbar;
+};
+
+struct tx_eop_header {
+ /* bit 15-11: sequence number, bit 10-0: packet length */
+ u16 seq_len;
+ /* bit 15-11: sequence number bar, bit 10-0: packet length bar */
+ u16 seqbar_lenbar;
+};
+
+struct tx_pkt_info {
+ struct tx_sop_header sop;
+ struct tx_segment_header seg;
+ struct tx_eop_header eop;
+ u16 pkt_len;
+ u16 seq_num;
+};
+
+/* Rx headers structure */
+struct rx_header {
+ u16 flags_len;
+ u16 seq_lenbar;
+ u16 flags;
+};
+
+extern unsigned long ax88796c_no_regs_mask[];
+
+#endif /* #ifndef _AX88796C_MAIN_H */
diff --git a/drivers/net/ethernet/asix/ax88796c_spi.c b/drivers/net/ethernet/asix/ax88796c_spi.c
new file mode 100644
index 000000000000..94df4f96d2be
--- /dev/null
+++ b/drivers/net/ethernet/asix/ax88796c_spi.c
@@ -0,0 +1,115 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2010 ASIX Electronics Corporation
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd.
+ *
+ * ASIX AX88796C SPI Fast Ethernet Linux driver
+ */
+
+#define pr_fmt(fmt) "ax88796c: " fmt
+
+#include <linux/string.h>
+#include <linux/spi/spi.h>
+
+#include "ax88796c_spi.h"
+
+const u8 ax88796c_rx_cmd_buf[5] = {AX_SPICMD_READ_RXQ, 0xFF, 0xFF, 0xFF, 0xFF};
+const u8 ax88796c_tx_cmd_buf[4] = {AX_SPICMD_WRITE_TXQ, 0xFF, 0xFF, 0xFF};
+
+/* driver bus management functions */
+int axspi_wakeup(struct axspi_data *ax_spi)
+{
+ int ret;
+
+ ax_spi->cmd_buf[0] = AX_SPICMD_EXIT_PWD; /* OP */
+ ret = spi_write(ax_spi->spi, ax_spi->cmd_buf, 1);
+ if (ret)
+ dev_err(&ax_spi->spi->dev, "%s() failed: ret = %d\n", __func__, ret);
+ return ret;
+}
+
+int axspi_read_status(struct axspi_data *ax_spi, struct spi_status *status)
+{
+ int ret;
+
+ /* OP */
+ ax_spi->cmd_buf[0] = AX_SPICMD_READ_STATUS;
+ ret = spi_write_then_read(ax_spi->spi, ax_spi->cmd_buf, 1, (u8 *)&status, 3);
+ if (ret)
+ dev_err(&ax_spi->spi->dev, "%s() failed: ret = %d\n", __func__, ret);
+ else
+ le16_to_cpus(&status->isr);
+
+ return ret;
+}
+
+int axspi_read_rxq(struct axspi_data *ax_spi, void *data, int len)
+{
+ struct spi_transfer *xfer = ax_spi->spi_rx_xfer;
+ int ret;
+
+ memcpy(ax_spi->cmd_buf, ax88796c_rx_cmd_buf, 5);
+
+ xfer->tx_buf = ax_spi->cmd_buf;
+ xfer->rx_buf = NULL;
+ xfer->len = ax_spi->comp ? 2 : 5;
+ xfer->bits_per_word = 8;
+ spi_message_add_tail(xfer, &ax_spi->rx_msg);
+
+ xfer++;
+ xfer->rx_buf = data;
+ xfer->tx_buf = NULL;
+ xfer->len = len;
+ xfer->bits_per_word = 8;
+ spi_message_add_tail(xfer, &ax_spi->rx_msg);
+ ret = spi_sync(ax_spi->spi, &ax_spi->rx_msg);
+ if (ret)
+ dev_err(&ax_spi->spi->dev, "%s() failed: ret = %d\n", __func__, ret);
+
+ return ret;
+}
+
+int axspi_write_txq(const struct axspi_data *ax_spi, void *data, int len)
+{
+ return spi_write(ax_spi->spi, data, len);
+}
+
+u16 axspi_read_reg(struct axspi_data *ax_spi, u8 reg)
+{
+ int ret;
+ int len = ax_spi->comp ? 3 : 4;
+
+ ax_spi->cmd_buf[0] = 0x03; /* OP code read register */
+ ax_spi->cmd_buf[1] = reg; /* register address */
+ ax_spi->cmd_buf[2] = 0xFF; /* dumy cycle */
+ ax_spi->cmd_buf[3] = 0xFF; /* dumy cycle */
+ ret = spi_write_then_read(ax_spi->spi,
+ ax_spi->cmd_buf, len,
+ ax_spi->rx_buf, 2);
+ if (ret) {
+ dev_err(&ax_spi->spi->dev,
+ "%s() failed: ret = %d\n", __func__, ret);
+ return 0xFFFF;
+ }
+
+ le16_to_cpus((u16 *)ax_spi->rx_buf);
+
+ return *(u16 *)ax_spi->rx_buf;
+}
+
+int axspi_write_reg(struct axspi_data *ax_spi, u8 reg, u16 value)
+{
+ int ret;
+
+ memset(ax_spi->cmd_buf, 0, sizeof(ax_spi->cmd_buf));
+ ax_spi->cmd_buf[0] = AX_SPICMD_WRITE_REG; /* OP code read register */
+ ax_spi->cmd_buf[1] = reg; /* register address */
+ ax_spi->cmd_buf[2] = value;
+ ax_spi->cmd_buf[3] = value >> 8;
+
+ ret = spi_write(ax_spi->spi, ax_spi->cmd_buf, 4);
+ if (ret)
+ dev_err(&ax_spi->spi->dev, "%s() failed: ret = %d\n", __func__, ret);
+ return ret;
+}
+
diff --git a/drivers/net/ethernet/asix/ax88796c_spi.h b/drivers/net/ethernet/asix/ax88796c_spi.h
new file mode 100644
index 000000000000..5bcf91f603fb
--- /dev/null
+++ b/drivers/net/ethernet/asix/ax88796c_spi.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2010 ASIX Electronics Corporation
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd.
+ *
+ * ASIX AX88796C SPI Fast Ethernet Linux driver
+ */
+
+#ifndef _AX88796C_SPI_H
+#define _AX88796C_SPI_H
+
+#include <linux/spi/spi.h>
+#include <linux/types.h>
+
+/* Definition of SPI command */
+#define AX_SPICMD_WRITE_TXQ 0x02
+#define AX_SPICMD_READ_REG 0x03
+#define AX_SPICMD_READ_STATUS 0x05
+#define AX_SPICMD_READ_RXQ 0x0B
+#define AX_SPICMD_BIDIR_WRQ 0xB2
+#define AX_SPICMD_WRITE_REG 0xD8
+#define AX_SPICMD_EXIT_PWD 0xAB
+
+extern const u8 ax88796c_rx_cmd_buf[];
+extern const u8 ax88796c_tx_cmd_buf[];
+
+struct axspi_data {
+ struct spi_device *spi;
+ struct spi_message rx_msg;
+ struct spi_transfer spi_rx_xfer[2];
+ u8 cmd_buf[6];
+ u8 rx_buf[6];
+ u8 comp;
+};
+
+struct spi_status {
+ u16 isr;
+ u8 status;
+# define AX_STATUS_READY 0x80
+};
+
+int axspi_read_rxq(struct axspi_data *ax_spi, void *data, int len);
+int axspi_write_txq(const struct axspi_data *ax_spi, void *data, int len);
+u16 axspi_read_reg(struct axspi_data *ax_spi, u8 reg);
+int axspi_write_reg(struct axspi_data *ax_spi, u8 reg, u16 value);
+int axspi_read_status(struct axspi_data *ax_spi, struct spi_status *status);
+int axspi_wakeup(struct axspi_data *ax_spi);
+
+static inline u16 AX_READ(struct axspi_data *ax_spi, u8 offset)
+{
+ return axspi_read_reg(ax_spi, offset);
+}
+
+static inline int AX_WRITE(struct axspi_data *ax_spi, u16 value, u8 offset)
+{
+ return axspi_write_reg(ax_spi, offset, value);
+}
+
+static inline int AX_READ_STATUS(struct axspi_data *ax_spi,
+ struct spi_status *status)
+{
+ return axspi_read_status(ax_spi, status);
+}
+
+static inline int AX_WAKEUP(struct axspi_data *ax_spi)
+{
+ return axspi_wakeup(ax_spi);
+}
+#endif
diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c
index 02ae98aabf91..ada3a9f0c8c8 100644
--- a/drivers/net/ethernet/atheros/ag71xx.c
+++ b/drivers/net/ethernet/atheros/ag71xx.c
@@ -1968,10 +1968,10 @@ static int ag71xx_probe(struct platform_device *pdev)
ag->stop_desc->ctrl = 0;
ag->stop_desc->next = (u32)ag->stop_desc_dma;
- err = of_get_mac_address(np, ndev->dev_addr);
+ err = of_get_ethdev_address(np, ndev);
if (err) {
netif_err(ag, probe, ndev, "invalid MAC address, using random address\n");
- eth_random_addr(ndev->dev_addr);
+ eth_hw_addr_random(ndev);
}
err = of_get_phy_mode(np, &ag->phy_if_mode);
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index 4ea157efca86..4ad3fc72e74e 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -607,7 +607,7 @@ static int alx_set_mac_address(struct net_device *netdev, void *data)
if (netdev->addr_assign_type & NET_ADDR_RANDOM)
netdev->addr_assign_type ^= NET_ADDR_RANDOM;
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, addr->sa_data);
memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len);
alx_set_macaddr(hw, hw->mac_addr);
@@ -1832,7 +1832,7 @@ static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
memcpy(hw->mac_addr, hw->perm_addr, ETH_ALEN);
- memcpy(netdev->dev_addr, hw->mac_addr, ETH_ALEN);
+ eth_hw_addr_set(netdev, hw->mac_addr);
memcpy(netdev->perm_addr, hw->perm_addr, ETH_ALEN);
hw->mdio.prtad = 0;
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 3b51b172b317..da595242bc13 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -482,7 +482,7 @@ static int atl1c_set_mac_addr(struct net_device *netdev, void *p)
if (netif_running(netdev))
return -EBUSY;
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, addr->sa_data);
memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len);
atl1c_hw_set_mac_addr(&adapter->hw, adapter->hw.mac_addr);
@@ -1847,7 +1847,7 @@ static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter, u32 queue,
buffer_info->skb = NULL;
buffer_info->length = 0;
ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_FREE);
- netif_warn(adapter, rx_err, adapter->netdev, "RX pci_map_single failed");
+ netif_warn(adapter, rx_err, adapter->netdev, "RX dma_map_single failed");
break;
}
buffer_info->dma = mapping;
@@ -2662,10 +2662,8 @@ static int atl1c_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* enable device (incl. PCI PM wakeup and hotplug setup) */
err = pci_enable_device_mem(pdev);
- if (err) {
- dev_err(&pdev->dev, "cannot enable PCI device\n");
- return err;
- }
+ if (err)
+ return dev_err_probe(&pdev->dev, err, "cannot enable PCI device\n");
/*
* The atl1c chip can DMA to 64-bit addresses, but it uses a single
@@ -2769,7 +2767,7 @@ static int atl1c_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* got a random MAC address, set NET_ADDR_RANDOM to netdev */
netdev->addr_assign_type = NET_ADDR_RANDOM;
}
- memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
+ eth_hw_addr_set(netdev, adapter->hw.mac_addr);
if (netif_msg_probe(adapter))
dev_dbg(&pdev->dev, "mac address : %pM\n",
adapter->hw.mac_addr);
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index 753973ac922e..56e5f440e666 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -374,7 +374,7 @@ static int atl1e_set_mac_addr(struct net_device *netdev, void *p)
if (netif_running(netdev))
return -EBUSY;
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, addr->sa_data);
memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len);
atl1e_hw_set_mac_addr(&adapter->hw);
@@ -2297,10 +2297,8 @@ static int atl1e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
int err = 0;
err = pci_enable_device(pdev);
- if (err) {
- dev_err(&pdev->dev, "cannot enable PCI device\n");
- return err;
- }
+ if (err)
+ return dev_err_probe(&pdev->dev, err, "cannot enable PCI device\n");
/*
* The atl1e chip can DMA to 64-bit addresses, but it uses a single
@@ -2392,7 +2390,7 @@ static int atl1e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_eeprom;
}
- memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
+ eth_hw_addr_set(netdev, adapter->hw.mac_addr);
netdev_dbg(netdev, "mac address : %pM\n", adapter->hw.mac_addr);
INIT_WORK(&adapter->reset_task, atl1e_reset_task);
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index 68f6c0bbd945..b4c9e805e981 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -3027,7 +3027,7 @@ static int atl1_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* mark random mac */
netdev->addr_assign_type = NET_ADDR_RANDOM;
}
- memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
+ eth_hw_addr_set(netdev, adapter->hw.mac_addr);
if (!is_valid_ether_addr(netdev->dev_addr)) {
err = -EIO;
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
index b69298ddb647..bbc4d7b08a49 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -931,7 +931,7 @@ static int atl2_set_mac(struct net_device *netdev, void *p)
if (netif_running(netdev))
return -EBUSY;
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, addr->sa_data);
memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len);
atl2_set_mac_addr(&adapter->hw);
@@ -1405,7 +1405,7 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* copy the MAC address out of the EEPROM */
atl2_read_mac_addr(&adapter->hw);
- memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
+ eth_hw_addr_set(netdev, adapter->hw.mac_addr);
if (!is_valid_ether_addr(netdev->dev_addr)) {
err = -EIO;
goto err_eeprom;
diff --git a/drivers/net/ethernet/atheros/atlx/atlx.c b/drivers/net/ethernet/atheros/atlx/atlx.c
index 0941d07d0833..e8cfbf4ff1b5 100644
--- a/drivers/net/ethernet/atheros/atlx/atlx.c
+++ b/drivers/net/ethernet/atheros/atlx/atlx.c
@@ -69,7 +69,7 @@ static int atlx_set_mac(struct net_device *netdev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, addr->sa_data);
memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len);
atlx_set_mac_addr(&adapter->hw);
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index fa784953c601..969591bbc066 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -218,7 +218,8 @@ static inline void __b44_cam_read(struct b44 *bp, unsigned char *data, int index
data[1] = (val >> 0) & 0xFF;
}
-static inline void __b44_cam_write(struct b44 *bp, unsigned char *data, int index)
+static inline void __b44_cam_write(struct b44 *bp,
+ const unsigned char *data, int index)
{
u32 val;
@@ -1200,7 +1201,7 @@ static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp)
bp->rx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size,
&bp->rx_ring_dma, gfp);
if (!bp->rx_ring) {
- /* Allocation may have failed due to pci_alloc_consistent
+ /* Allocation may have failed due to dma_alloc_coherent
insisting on use of GFP_DMA, which is more restrictive
than necessary... */
struct dma_desc *rx_ring;
@@ -1383,7 +1384,7 @@ static int b44_set_mac_addr(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EINVAL;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, addr->sa_data);
spin_lock_irq(&bp->lock);
@@ -1507,7 +1508,8 @@ static void bwfilter_table(struct b44 *bp, u8 *pp, u32 bytes, u32 table_offset)
}
}
-static int b44_magic_pattern(u8 *macaddr, u8 *ppattern, u8 *pmask, int offset)
+static int b44_magic_pattern(const u8 *macaddr, u8 *ppattern, u8 *pmask,
+ int offset)
{
int magicsync = 6;
int k, j, len = offset;
@@ -2171,7 +2173,7 @@ static int b44_get_invariants(struct b44 *bp)
* valid PHY address. */
bp->phy_addr &= 0x1F;
- memcpy(bp->dev->dev_addr, addr, ETH_ALEN);
+ eth_hw_addr_set(bp->dev, addr);
if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){
pr_err("Invalid MAC address found in EEPROM\n");
diff --git a/drivers/net/ethernet/broadcom/bcm4908_enet.c b/drivers/net/ethernet/broadcom/bcm4908_enet.c
index 02a569500234..7cc5213c575a 100644
--- a/drivers/net/ethernet/broadcom/bcm4908_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm4908_enet.c
@@ -170,7 +170,7 @@ static int bcm4908_dma_alloc_buf_descs(struct bcm4908_enet *enet,
goto err_free_buf_descs;
}
- ring->slots = kzalloc(ring->length * sizeof(*ring->slots), GFP_KERNEL);
+ ring->slots = kcalloc(ring->length, sizeof(*ring->slots), GFP_KERNEL);
if (!ring->slots)
goto err_free_buf_descs;
@@ -715,7 +715,7 @@ static int bcm4908_enet_probe(struct platform_device *pdev)
return err;
SET_NETDEV_DEV(netdev, &pdev->dev);
- err = of_get_mac_address(dev->of_node, netdev->dev_addr);
+ err = of_get_ethdev_address(dev->of_node, netdev);
if (err)
eth_hw_addr_random(netdev);
netdev->netdev_ops = &bcm4908_enet_netdev_ops;
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index d56886300ecf..a568994a03a6 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -670,7 +670,7 @@ static int bcm_enet_set_mac_address(struct net_device *dev, void *p)
u32 val;
priv = netdev_priv(dev);
- memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
+ eth_hw_addr_set(dev, addr->sa_data);
/* use perfect match register 0 to store my mac address */
val = (dev->dev_addr[2] << 24) | (dev->dev_addr[3] << 16) |
@@ -1762,7 +1762,7 @@ static int bcm_enet_probe(struct platform_device *pdev)
pd = dev_get_platdata(&pdev->dev);
if (pd) {
- memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN);
+ eth_hw_addr_set(dev, pd->mac_addr);
priv->has_phy = pd->has_phy;
priv->phy_id = pd->phy_id;
priv->has_phy_interrupt = pd->has_phy_interrupt;
@@ -2665,7 +2665,7 @@ static int bcm_enetsw_probe(struct platform_device *pdev)
pd = dev_get_platdata(&pdev->dev);
if (pd) {
- memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN);
+ eth_hw_addr_set(dev, pd->mac_addr);
memcpy(priv->used_ports, pd->used_ports,
sizeof(pd->used_ports));
priv->num_ports = pd->num_ports;
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 7fa1b695400d..40933bf5a710 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -1818,7 +1818,7 @@ static inline void umac_reset(struct bcm_sysport_priv *priv)
}
static void umac_set_hw_addr(struct bcm_sysport_priv *priv,
- unsigned char *addr)
+ const unsigned char *addr)
{
u32 mac0 = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) |
addr[3];
@@ -1850,7 +1850,7 @@ static int bcm_sysport_change_mac(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EINVAL;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, addr->sa_data);
/* interface is disabled, changes to MAC will be reflected on next
* open call
@@ -2555,7 +2555,7 @@ static int bcm_sysport_probe(struct platform_device *pdev)
}
/* Initialize netdevice members */
- ret = of_get_mac_address(dn, dev->dev_addr);
+ ret = of_get_ethdev_address(dn, dev);
if (ret) {
dev_warn(&pdev->dev, "using random Ethernet MAC\n");
eth_hw_addr_random(dev);
diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c b/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c
index 6ce80cbcb48e..086739e4f40a 100644
--- a/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c
+++ b/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c
@@ -10,6 +10,7 @@
#include <linux/bcma/bcma.h>
#include <linux/brcmphy.h>
+#include <linux/of_mdio.h>
#include "bgmac.h"
static bool bcma_mdio_wait_value(struct bcma_device *core, u16 reg, u32 mask,
@@ -211,6 +212,7 @@ struct mii_bus *bcma_mdio_mii_register(struct bgmac *bgmac)
{
struct bcma_device *core = bgmac->bcma.core;
struct mii_bus *mii_bus;
+ struct device_node *np;
int err;
mii_bus = mdiobus_alloc();
@@ -229,7 +231,9 @@ struct mii_bus *bcma_mdio_mii_register(struct bgmac *bgmac)
mii_bus->parent = &core->dev;
mii_bus->phy_mask = ~(1 << bgmac->phyaddr);
- err = mdiobus_register(mii_bus);
+ np = of_get_child_by_name(core->dev.of_node, "mdio");
+
+ err = of_mdiobus_register(mii_bus, np);
if (err) {
dev_err(&core->dev, "Registration of mii bus failed\n");
goto err_free_bus;
diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma.c b/drivers/net/ethernet/broadcom/bgmac-bcma.c
index 9513cfb5ba58..e6f48786949c 100644
--- a/drivers/net/ethernet/broadcom/bgmac-bcma.c
+++ b/drivers/net/ethernet/broadcom/bgmac-bcma.c
@@ -11,6 +11,7 @@
#include <linux/bcma/bcma.h>
#include <linux/brcmphy.h>
#include <linux/etherdevice.h>
+#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include "bgmac.h"
@@ -86,17 +87,28 @@ static int bcma_phy_connect(struct bgmac *bgmac)
struct phy_device *phy_dev;
char bus_id[MII_BUS_ID_SIZE + 3];
+ /* DT info should be the most accurate */
+ phy_dev = of_phy_get_and_connect(bgmac->net_dev, bgmac->dev->of_node,
+ bgmac_adjust_link);
+ if (phy_dev)
+ return 0;
+
/* Connect to the PHY */
- snprintf(bus_id, sizeof(bus_id), PHY_ID_FMT, bgmac->mii_bus->id,
- bgmac->phyaddr);
- phy_dev = phy_connect(bgmac->net_dev, bus_id, bgmac_adjust_link,
- PHY_INTERFACE_MODE_MII);
- if (IS_ERR(phy_dev)) {
- dev_err(bgmac->dev, "PHY connection failed\n");
- return PTR_ERR(phy_dev);
+ if (bgmac->mii_bus && bgmac->phyaddr != BGMAC_PHY_NOREGS) {
+ snprintf(bus_id, sizeof(bus_id), PHY_ID_FMT, bgmac->mii_bus->id,
+ bgmac->phyaddr);
+ phy_dev = phy_connect(bgmac->net_dev, bus_id, bgmac_adjust_link,
+ PHY_INTERFACE_MODE_MII);
+ if (IS_ERR(phy_dev)) {
+ dev_err(bgmac->dev, "PHY connection failed\n");
+ return PTR_ERR(phy_dev);
+ }
+
+ return 0;
}
- return 0;
+ /* Assume a fixed link to the switch port */
+ return bgmac_phy_connect_direct(bgmac);
}
static const struct bcma_device_id bgmac_bcma_tbl[] = {
@@ -128,7 +140,7 @@ static int bgmac_probe(struct bcma_device *core)
bcma_set_drvdata(core, bgmac);
- err = of_get_mac_address(bgmac->dev->of_node, bgmac->net_dev->dev_addr);
+ err = of_get_ethdev_address(bgmac->dev->of_node, bgmac->net_dev);
if (err == -EPROBE_DEFER)
return err;
@@ -150,7 +162,7 @@ static int bgmac_probe(struct bcma_device *core)
err = -ENOTSUPP;
goto err;
}
- ether_addr_copy(bgmac->net_dev->dev_addr, mac);
+ eth_hw_addr_set(bgmac->net_dev, mac);
}
/* On BCM4706 we need common core to access PHY */
@@ -297,10 +309,7 @@ static int bgmac_probe(struct bcma_device *core)
bgmac->cco_ctl_maskset = bcma_bgmac_cco_ctl_maskset;
bgmac->get_bus_clock = bcma_bgmac_get_bus_clock;
bgmac->cmn_maskset32 = bcma_bgmac_cmn_maskset32;
- if (bgmac->mii_bus)
- bgmac->phy_connect = bcma_phy_connect;
- else
- bgmac->phy_connect = bgmac_phy_connect_direct;
+ bgmac->phy_connect = bcma_phy_connect;
err = bgmac_enet_probe(bgmac);
if (err)
diff --git a/drivers/net/ethernet/broadcom/bgmac-platform.c b/drivers/net/ethernet/broadcom/bgmac-platform.c
index df8ff839cc62..c6412c523637 100644
--- a/drivers/net/ethernet/broadcom/bgmac-platform.c
+++ b/drivers/net/ethernet/broadcom/bgmac-platform.c
@@ -191,7 +191,7 @@ static int bgmac_probe(struct platform_device *pdev)
bgmac->dev = &pdev->dev;
bgmac->dma_dev = &pdev->dev;
- ret = of_get_mac_address(np, bgmac->net_dev->dev_addr);
+ ret = of_get_ethdev_address(np, bgmac->net_dev);
if (ret == -EPROBE_DEFER)
return ret;
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index fe4d99abd548..7b525c65bacb 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -768,7 +768,7 @@ static void bgmac_umac_cmd_maskset(struct bgmac *bgmac, u32 mask, u32 set,
udelay(2);
}
-static void bgmac_write_mac_address(struct bgmac *bgmac, u8 *addr)
+static void bgmac_write_mac_address(struct bgmac *bgmac, const u8 *addr)
{
u32 tmp;
@@ -1241,7 +1241,7 @@ static int bgmac_set_mac_address(struct net_device *net_dev, void *addr)
if (ret < 0)
return ret;
- ether_addr_copy(net_dev->dev_addr, sa->sa_data);
+ eth_hw_addr_set(net_dev, sa->sa_data);
bgmac_write_mac_address(bgmac, net_dev->dev_addr);
eth_commit_mac_addr_change(net_dev, addr);
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 8c83973adca5..babc955ba64e 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -2704,7 +2704,7 @@ bnx2_alloc_bad_rbuf(struct bnx2 *bp)
}
static void
-bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
+bnx2_set_mac_addr(struct bnx2 *bp, const u8 *mac_addr, u32 pos)
{
u32 val;
@@ -7910,7 +7910,7 @@ bnx2_change_mac_addr(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, addr->sa_data);
if (netif_running(dev))
bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
@@ -8574,7 +8574,7 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (is_kdump_kernel())
bnx2_wait_dma_complete(bp);
- memcpy(dev->dev_addr, bp->mac_addr, ETH_ALEN);
+ eth_hw_addr_set(dev, bp->mac_addr);
dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
NETIF_F_TSO | NETIF_F_TSO_ECN |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index e789430f407c..2b06d78baa08 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -1994,7 +1994,7 @@ int bnx2x_idle_chk(struct bnx2x *bp);
* operation has been successfully scheduled and a negative - if a requested
* operations has failed.
*/
-int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac,
+int bnx2x_set_mac_one(struct bnx2x *bp, const u8 *mac,
struct bnx2x_vlan_mac_obj *obj, bool set,
int mac_type, unsigned long *ramrod_flags);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index b5d954cb409a..e8e8c2d593c5 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -4336,7 +4336,7 @@ int bnx2x_change_mac_addr(struct net_device *dev, void *p)
return rc;
}
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, addr->sa_data);
if (netif_running(dev))
rc = bnx2x_set_eth_mac(bp, true);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index ae87296ae1ff..aec666e97683 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -8417,7 +8417,7 @@ alloc_mem_err:
* Init service functions
*/
-int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac,
+int bnx2x_set_mac_one(struct bnx2x *bp, const u8 *mac,
struct bnx2x_vlan_mac_obj *obj, bool set,
int mac_type, unsigned long *ramrod_flags)
{
@@ -9146,7 +9146,7 @@ u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode)
else if (bp->wol) {
u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
- u8 *mac_addr = bp->dev->dev_addr;
+ const u8 *mac_addr = bp->dev->dev_addr;
struct pci_dev *pdev = bp->pdev;
u32 val;
u16 pmc;
@@ -11790,7 +11790,7 @@ static void bnx2x_get_cnic_mac_hwinfo(struct bnx2x *bp)
* as the SAN mac was copied from the primary MAC.
*/
if (IS_MF_FCOE_AFEX(bp))
- memcpy(bp->dev->dev_addr, fip_mac, ETH_ALEN);
+ eth_hw_addr_set(bp->dev, fip_mac);
} else {
val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
iscsi_mac_upper);
@@ -11823,9 +11823,10 @@ static void bnx2x_get_mac_hwinfo(struct bnx2x *bp)
u32 val, val2;
int func = BP_ABS_FUNC(bp);
int port = BP_PORT(bp);
+ u8 addr[ETH_ALEN] = {};
/* Zero primary MAC configuration */
- eth_zero_addr(bp->dev->dev_addr);
+ eth_hw_addr_set(bp->dev, addr);
if (BP_NOMCP(bp)) {
BNX2X_ERROR("warning: random MAC workaround active\n");
@@ -11834,8 +11835,10 @@ static void bnx2x_get_mac_hwinfo(struct bnx2x *bp)
val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
- (val != FUNC_MF_CFG_LOWERMAC_DEFAULT))
- bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
+ (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
+ bnx2x_set_mac_buf(addr, val, val2);
+ eth_hw_addr_set(bp->dev, addr);
+ }
if (CNIC_SUPPORT(bp))
bnx2x_get_cnic_mac_hwinfo(bp);
@@ -11843,7 +11846,8 @@ static void bnx2x_get_mac_hwinfo(struct bnx2x *bp)
/* in SF read MACs from port configuration */
val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
- bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
+ bnx2x_set_mac_buf(addr, val, val2);
+ eth_hw_addr_set(bp->dev, addr);
if (CNIC_SUPPORT(bp))
bnx2x_get_cnic_mac_hwinfo(bp);
@@ -12291,7 +12295,9 @@ static int bnx2x_init_bp(struct bnx2x *bp)
if (rc)
return rc;
} else {
- eth_zero_addr(bp->dev->dev_addr);
+ static const u8 zero_addr[ETH_ALEN] = {};
+
+ eth_hw_addr_set(bp->dev, zero_addr);
}
bnx2x_set_modes_bitmap(bp);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 6fbf735fca31..74a8931ce1d1 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -3058,7 +3058,7 @@ enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp)
if (bulletin->valid_bitmap & 1 << MAC_ADDR_VALID &&
!ether_addr_equal(bulletin->mac, bp->old_bulletin.mac)) {
/* update new mac to net device */
- memcpy(bp->dev->dev_addr, bulletin->mac, ETH_ALEN);
+ eth_hw_addr_set(bp->dev, bulletin->mac);
}
if (bulletin->valid_bitmap & (1 << LINK_VALID)) {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
index 966d5722c5e2..8c2cf5519787 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -508,7 +508,8 @@ int bnx2x_vfpf_init(struct bnx2x *bp);
void bnx2x_vfpf_close_vf(struct bnx2x *bp);
int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp,
bool is_leading);
-int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set);
+int bnx2x_vfpf_config_mac(struct bnx2x *bp, const u8 *addr, u8 vf_qid,
+ bool set);
int bnx2x_vfpf_config_rss(struct bnx2x *bp,
struct bnx2x_config_rss_params *params);
int bnx2x_vfpf_set_mcast(struct net_device *dev);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index ea0e9394f898..c9129b9ba446 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -384,9 +384,8 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
sizeof(bp->fw_ver));
if (is_valid_ether_addr(bp->acquire_resp.resc.current_mac_addr))
- memcpy(bp->dev->dev_addr,
- bp->acquire_resp.resc.current_mac_addr,
- ETH_ALEN);
+ eth_hw_addr_set(bp->dev,
+ bp->acquire_resp.resc.current_mac_addr);
out:
bnx2x_vfpf_finalize(bp, &req->first_tlv);
@@ -722,7 +721,7 @@ out:
}
/* request pf to add a mac for the vf */
-int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set)
+int bnx2x_vfpf_config_mac(struct bnx2x *bp, const u8 *addr, u8 vf_qid, bool set)
{
struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
@@ -767,7 +766,7 @@ int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set)
"vfpf SET MAC failed. Check bulletin board for new posts\n");
/* copy mac from bulletin to device */
- memcpy(bp->dev->dev_addr, bulletin.mac, ETH_ALEN);
+ eth_hw_addr_set(bp->dev, bulletin.mac);
/* check if bulletin board was updated */
if (bnx2x_sample_bulletin(bp) == PFVF_BULLETIN_UPDATED) {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 62f84cc91e4d..66263aa0d96b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -4869,7 +4869,7 @@ static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
#endif
static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
- u8 *mac_addr)
+ const u8 *mac_addr)
{
struct hwrm_cfa_l2_filter_alloc_output *resp;
struct hwrm_cfa_l2_filter_alloc_input *req;
@@ -6366,7 +6366,7 @@ static int __bnxt_reserve_rings(struct bnxt *bp)
if (rx_rings != bp->rx_nr_rings) {
netdev_warn(bp->dev, "Able to reserve only %d out of %d requested RX rings\n",
rx_rings, bp->rx_nr_rings);
- if ((bp->dev->priv_flags & IFF_RXFH_CONFIGURED) &&
+ if (netif_is_rxfh_configured(bp->dev) &&
(bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) !=
bnxt_get_nr_rss_ctxs(bp, rx_rings) ||
bnxt_get_max_rss_ring(bp) >= rx_rings)) {
@@ -12369,7 +12369,7 @@ static int bnxt_change_mac_addr(struct net_device *dev, void *p)
if (rc)
return rc;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, addr->sa_data);
if (netif_running(dev)) {
bnxt_close_nic(bp, false, false);
rc = bnxt_open_nic(bp, false, false);
@@ -13103,7 +13103,7 @@ static int bnxt_init_mac_addr(struct bnxt *bp)
int rc = 0;
if (BNXT_PF(bp)) {
- memcpy(bp->dev->dev_addr, bp->pf.mac_addr, ETH_ALEN);
+ eth_hw_addr_set(bp->dev, bp->pf.mac_addr);
} else {
#ifdef CONFIG_BNXT_SRIOV
struct bnxt_vf_info *vf = &bp->vf;
@@ -13111,7 +13111,7 @@ static int bnxt_init_mac_addr(struct bnxt *bp)
if (is_valid_ether_addr(vf->mac_addr)) {
/* overwrite netdev dev_addr with admin VF MAC */
- memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
+ eth_hw_addr_set(bp->dev, vf->mac_addr);
/* Older PF driver or firmware may not approve this
* correctly.
*/
@@ -13370,7 +13370,9 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
}
bnxt_inv_fw_health_reg(bp);
- bnxt_dl_register(bp);
+ rc = bnxt_dl_register(bp);
+ if (rc)
+ goto init_err_dl;
rc = register_netdev(dev);
if (rc)
@@ -13390,6 +13392,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
init_err_cleanup:
bnxt_dl_unregister(bp);
+init_err_dl:
bnxt_shutdown_tc(bp);
bnxt_clear_int_mode(bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index 9576547df4ab..951c0c00cc95 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -134,7 +134,7 @@ void bnxt_dl_fw_reporters_create(struct bnxt *bp)
{
struct bnxt_fw_health *health = bp->fw_health;
- if (!bp->dl || !health)
+ if (!health)
return;
if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) || health->fw_reset_reporter)
@@ -188,7 +188,7 @@ void bnxt_dl_fw_reporters_destroy(struct bnxt *bp, bool all)
{
struct bnxt_fw_health *health = bp->fw_health;
- if (!bp->dl || !health)
+ if (!health)
return;
if ((all || !(bp->fw_cap & BNXT_FW_CAP_HOT_RESET)) &&
@@ -736,9 +736,6 @@ static const struct devlink_param bnxt_dl_params[] = {
NULL),
};
-static const struct devlink_param bnxt_dl_port_params[] = {
-};
-
static int bnxt_dl_params_register(struct bnxt *bp)
{
int rc;
@@ -748,22 +745,10 @@ static int bnxt_dl_params_register(struct bnxt *bp)
rc = devlink_params_register(bp->dl, bnxt_dl_params,
ARRAY_SIZE(bnxt_dl_params));
- if (rc) {
+ if (rc)
netdev_warn(bp->dev, "devlink_params_register failed. rc=%d\n",
rc);
- return rc;
- }
- rc = devlink_port_params_register(&bp->dl_port, bnxt_dl_port_params,
- ARRAY_SIZE(bnxt_dl_port_params));
- if (rc) {
- netdev_err(bp->dev, "devlink_port_params_register failed\n");
- devlink_params_unregister(bp->dl, bnxt_dl_params,
- ARRAY_SIZE(bnxt_dl_params));
- return rc;
- }
- devlink_params_publish(bp->dl);
-
- return 0;
+ return rc;
}
static void bnxt_dl_params_unregister(struct bnxt *bp)
@@ -773,14 +758,13 @@ static void bnxt_dl_params_unregister(struct bnxt *bp)
devlink_params_unregister(bp->dl, bnxt_dl_params,
ARRAY_SIZE(bnxt_dl_params));
- devlink_port_params_unregister(&bp->dl_port, bnxt_dl_port_params,
- ARRAY_SIZE(bnxt_dl_port_params));
}
int bnxt_dl_register(struct bnxt *bp)
{
const struct devlink_ops *devlink_ops;
struct devlink_port_attrs attrs = {};
+ struct bnxt_dl *bp_dl;
struct devlink *dl;
int rc;
@@ -795,21 +779,17 @@ int bnxt_dl_register(struct bnxt *bp)
return -ENOMEM;
}
- bnxt_link_bp_to_dl(bp, dl);
+ bp->dl = dl;
+ bp_dl = devlink_priv(dl);
+ bp_dl->bp = bp;
/* Add switchdev eswitch mode setting, if SRIOV supported */
if (pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV) &&
bp->hwrm_spec_code > 0x10803)
bp->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY;
- rc = devlink_register(dl);
- if (rc) {
- netdev_warn(bp->dev, "devlink_register failed. rc=%d\n", rc);
- goto err_dl_free;
- }
-
if (!BNXT_PF(bp))
- return 0;
+ goto out;
attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
attrs.phys.port_number = bp->pf.port_id;
@@ -819,21 +799,20 @@ int bnxt_dl_register(struct bnxt *bp)
rc = devlink_port_register(dl, &bp->dl_port, bp->pf.port_id);
if (rc) {
netdev_err(bp->dev, "devlink_port_register failed\n");
- goto err_dl_unreg;
+ goto err_dl_free;
}
rc = bnxt_dl_params_register(bp);
if (rc)
goto err_dl_port_unreg;
+out:
+ devlink_register(dl);
return 0;
err_dl_port_unreg:
devlink_port_unregister(&bp->dl_port);
-err_dl_unreg:
- devlink_unregister(dl);
err_dl_free:
- bnxt_link_bp_to_dl(bp, NULL);
devlink_free(dl);
return rc;
}
@@ -842,13 +821,10 @@ void bnxt_dl_unregister(struct bnxt *bp)
{
struct devlink *dl = bp->dl;
- if (!dl)
- return;
-
+ devlink_unregister(dl);
if (BNXT_PF(bp)) {
bnxt_dl_params_unregister(bp);
devlink_port_unregister(&bp->dl_port);
}
- devlink_unregister(dl);
devlink_free(dl);
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
index d889f240da2b..406dc655a5fc 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
@@ -20,19 +20,6 @@ static inline struct bnxt *bnxt_get_bp_from_dl(struct devlink *dl)
return ((struct bnxt_dl *)devlink_priv(dl))->bp;
}
-/* To clear devlink pointer from bp, pass NULL dl */
-static inline void bnxt_link_bp_to_dl(struct bnxt *bp, struct devlink *dl)
-{
- bp->dl = dl;
-
- /* add a back pointer in dl to bp */
- if (dl) {
- struct bnxt_dl *bp_dl = devlink_priv(dl);
-
- bp_dl->bp = bp;
- }
-}
-
#define NVM_OFF_MSIX_VEC_PER_PF_MAX 108
#define NVM_OFF_MSIX_VEC_PER_PF_MIN 114
#define NVM_OFF_IGNORE_ARI 164
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 7260910e75fb..fbb56b1f70fd 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -909,7 +909,7 @@ static int bnxt_set_channels(struct net_device *dev,
if (bnxt_get_nr_rss_ctxs(bp, req_rx_rings) !=
bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) &&
- (dev->priv_flags & IFF_RXFH_CONFIGURED)) {
+ netif_is_rxfh_configured(dev)) {
netdev_warn(dev, "RSS table size change required, RSS table entries must be default to proceed\n");
return -EINVAL;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index 70d8ca3039dc..1d177fed44a6 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -1151,7 +1151,7 @@ void bnxt_hwrm_exec_fwd_req(struct bnxt *bp)
}
}
-int bnxt_approve_mac(struct bnxt *bp, u8 *mac, bool strict)
+int bnxt_approve_mac(struct bnxt *bp, const u8 *mac, bool strict)
{
struct hwrm_func_vf_cfg_input *req;
int rc = 0;
@@ -1217,7 +1217,7 @@ void bnxt_update_vf_mac(struct bnxt *bp)
/* overwrite netdev dev_addr with admin VF MAC */
if (is_valid_ether_addr(bp->vf.mac_addr))
- memcpy(bp->dev->dev_addr, bp->vf.mac_addr, ETH_ALEN);
+ eth_hw_addr_set(bp->dev, bp->vf.mac_addr);
update_vf_mac_exit:
hwrm_req_drop(bp, req);
if (inform_pf)
@@ -1246,7 +1246,7 @@ void bnxt_update_vf_mac(struct bnxt *bp)
{
}
-int bnxt_approve_mac(struct bnxt *bp, u8 *mac, bool strict)
+int bnxt_approve_mac(struct bnxt *bp, const u8 *mac, bool strict)
{
return 0;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
index 995535e4c11b..9a4bacba477b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
@@ -41,5 +41,5 @@ int bnxt_cfg_hw_sriov(struct bnxt *bp, int *num_vfs, bool reset);
void bnxt_sriov_disable(struct bnxt *);
void bnxt_hwrm_exec_fwd_req(struct bnxt *);
void bnxt_update_vf_mac(struct bnxt *);
-int bnxt_approve_mac(struct bnxt *, u8 *, bool);
+int bnxt_approve_mac(struct bnxt *, const u8 *, bool);
#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
index 9401936b74fa..8eb28e088582 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
@@ -475,7 +475,7 @@ static void bnxt_vf_rep_netdev_init(struct bnxt *bp, struct bnxt_vf_rep *vf_rep,
dev->features |= pf_dev->features;
bnxt_vf_rep_eth_addr_gen(bp->pf.mac_addr, vf_rep->vf_idx,
dev->perm_addr);
- ether_addr_copy(dev->dev_addr, dev->perm_addr);
+ eth_hw_addr_set(dev, dev->perm_addr);
/* Set VF-Rep's max-mtu to the corresponding VF's max-mtu */
if (!bnxt_hwrm_vfr_qcfg(bp, vf_rep, &max_mtu))
dev->max_mtu = max_mtu;
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 23c7595d2a1d..5da9c00b43b1 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -935,6 +935,48 @@ static int bcmgenet_set_coalesce(struct net_device *dev,
return 0;
}
+static void bcmgenet_get_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *epause)
+{
+ struct bcmgenet_priv *priv;
+ u32 umac_cmd;
+
+ priv = netdev_priv(dev);
+
+ epause->autoneg = priv->autoneg_pause;
+
+ if (netif_carrier_ok(dev)) {
+ /* report active state when link is up */
+ umac_cmd = bcmgenet_umac_readl(priv, UMAC_CMD);
+ epause->tx_pause = !(umac_cmd & CMD_TX_PAUSE_IGNORE);
+ epause->rx_pause = !(umac_cmd & CMD_RX_PAUSE_IGNORE);
+ } else {
+ /* otherwise report stored settings */
+ epause->tx_pause = priv->tx_pause;
+ epause->rx_pause = priv->rx_pause;
+ }
+}
+
+static int bcmgenet_set_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *epause)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+
+ if (!dev->phydev)
+ return -ENODEV;
+
+ if (!phy_validate_pause(dev->phydev, epause))
+ return -EINVAL;
+
+ priv->autoneg_pause = !!epause->autoneg;
+ priv->tx_pause = !!epause->tx_pause;
+ priv->rx_pause = !!epause->rx_pause;
+
+ bcmgenet_phy_pause_set(dev, priv->rx_pause, priv->tx_pause);
+
+ return 0;
+}
+
/* standard ethtool support functions. */
enum bcmgenet_stat_type {
BCMGENET_STAT_NETDEV = -1,
@@ -1587,6 +1629,8 @@ static const struct ethtool_ops bcmgenet_ethtool_ops = {
.get_ts_info = ethtool_op_get_ts_info,
.get_rxnfc = bcmgenet_get_rxnfc,
.set_rxnfc = bcmgenet_set_rxnfc,
+ .get_pauseparam = bcmgenet_get_pauseparam,
+ .set_pauseparam = bcmgenet_set_pauseparam,
};
/* Power down the unimac, based on mode. */
@@ -3222,7 +3266,7 @@ static void bcmgenet_umac_reset(struct bcmgenet_priv *priv)
}
static void bcmgenet_set_hw_addr(struct bcmgenet_priv *priv,
- unsigned char *addr)
+ const unsigned char *addr)
{
bcmgenet_umac_writel(priv, get_unaligned_be32(&addr[0]), UMAC_MAC0);
bcmgenet_umac_writel(priv, get_unaligned_be16(&addr[4]), UMAC_MAC1);
@@ -3364,6 +3408,8 @@ static int bcmgenet_open(struct net_device *dev)
goto err_irq1;
}
+ bcmgenet_phy_pause_set(dev, priv->rx_pause, priv->tx_pause);
+
bcmgenet_netif_start(dev);
netif_tx_start_all_queues(dev);
@@ -3408,11 +3454,6 @@ static void bcmgenet_netif_stop(struct net_device *dev)
*/
cancel_work_sync(&priv->bcmgenet_irq_work);
- priv->old_link = -1;
- priv->old_speed = -1;
- priv->old_duplex = -1;
- priv->old_pause = -1;
-
/* tx reclaim */
bcmgenet_tx_reclaim_all(dev);
bcmgenet_fini_dma(priv);
@@ -3519,7 +3560,7 @@ static void bcmgenet_timeout(struct net_device *dev, unsigned int txqueue)
#define MAX_MDF_FILTER 17
static inline void bcmgenet_set_mdf_addr(struct bcmgenet_priv *priv,
- unsigned char *addr,
+ const unsigned char *addr,
int *i)
{
bcmgenet_umac_writel(priv, addr[0] << 8 | addr[1],
@@ -3592,7 +3633,7 @@ static int bcmgenet_set_mac_addr(struct net_device *dev, void *p)
if (netif_running(dev))
return -EBUSY;
- ether_addr_copy(dev->dev_addr, addr->sa_data);
+ eth_hw_addr_set(dev, addr->sa_data);
return 0;
}
@@ -3950,6 +3991,11 @@ static int bcmgenet_probe(struct platform_device *pdev)
spin_lock_init(&priv->lock);
+ /* Set default pause parameters */
+ priv->autoneg_pause = 1;
+ priv->tx_pause = 1;
+ priv->rx_pause = 1;
+
SET_NETDEV_DEV(dev, &pdev->dev);
dev_set_drvdata(&pdev->dev, dev);
dev->watchdog_timeo = 2 * HZ;
@@ -4036,11 +4082,15 @@ static int bcmgenet_probe(struct platform_device *pdev)
bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
if (pd && !IS_ERR_OR_NULL(pd->mac_address))
- ether_addr_copy(dev->dev_addr, pd->mac_address);
+ eth_hw_addr_set(dev, pd->mac_address);
else
- if (!device_get_mac_address(&pdev->dev, dev->dev_addr, ETH_ALEN))
- if (has_acpi_companion(&pdev->dev))
- bcmgenet_get_hw_addr(priv, dev->dev_addr);
+ if (device_get_ethdev_address(&pdev->dev, dev))
+ if (has_acpi_companion(&pdev->dev)) {
+ u8 addr[ETH_ALEN];
+
+ bcmgenet_get_hw_addr(priv, addr);
+ eth_hw_addr_set(dev, addr);
+ }
if (!is_valid_ether_addr(dev->dev_addr)) {
dev_warn(&pdev->dev, "using random Ethernet MAC\n");
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index 0a6d91b0f0aa..1cc2838e52c6 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -594,6 +594,9 @@ struct bcmgenet_priv {
/* other misc variables */
struct bcmgenet_hw_params *hw_params;
+ unsigned autoneg_pause:1;
+ unsigned tx_pause:1;
+ unsigned rx_pause:1;
/* MDIO bus variables */
wait_queue_head_t wq;
@@ -606,10 +609,6 @@ struct bcmgenet_priv {
bool clk_eee_enabled;
/* PHY device variables */
- int old_link;
- int old_speed;
- int old_duplex;
- int old_pause;
phy_interface_t phy_interface;
int phy_addr;
int ext_phy;
@@ -690,6 +689,7 @@ int bcmgenet_mii_init(struct net_device *dev);
int bcmgenet_mii_config(struct net_device *dev, bool init);
int bcmgenet_mii_probe(struct net_device *dev);
void bcmgenet_mii_exit(struct net_device *dev);
+void bcmgenet_phy_pause_set(struct net_device *dev, bool rx, bool tx);
void bcmgenet_phy_power_set(struct net_device *dev, bool enable);
void bcmgenet_mii_setup(struct net_device *dev);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index 89d16c587bb7..ad56f54eda0a 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -25,92 +25,80 @@
#include "bcmgenet.h"
-/* setup netdev link state when PHY link status change and
- * update UMAC and RGMII block when link up
- */
-void bcmgenet_mii_setup(struct net_device *dev)
+static void bcmgenet_mac_config(struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
struct phy_device *phydev = dev->phydev;
u32 reg, cmd_bits = 0;
- bool status_changed = false;
- if (priv->old_link != phydev->link) {
- status_changed = true;
- priv->old_link = phydev->link;
- }
+ /* speed */
+ if (phydev->speed == SPEED_1000)
+ cmd_bits = CMD_SPEED_1000;
+ else if (phydev->speed == SPEED_100)
+ cmd_bits = CMD_SPEED_100;
+ else
+ cmd_bits = CMD_SPEED_10;
+ cmd_bits <<= CMD_SPEED_SHIFT;
- if (phydev->link) {
- /* check speed/duplex/pause changes */
- if (priv->old_speed != phydev->speed) {
- status_changed = true;
- priv->old_speed = phydev->speed;
- }
+ /* duplex */
+ if (phydev->duplex != DUPLEX_FULL) {
+ cmd_bits |= CMD_HD_EN |
+ CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE;
+ } else {
+ /* pause capability defaults to Symmetric */
+ if (priv->autoneg_pause) {
+ bool tx_pause = 0, rx_pause = 0;
- if (priv->old_duplex != phydev->duplex) {
- status_changed = true;
- priv->old_duplex = phydev->duplex;
- }
+ if (phydev->autoneg)
+ phy_get_pause(phydev, &tx_pause, &rx_pause);
- if (priv->old_pause != phydev->pause) {
- status_changed = true;
- priv->old_pause = phydev->pause;
+ if (!tx_pause)
+ cmd_bits |= CMD_TX_PAUSE_IGNORE;
+ if (!rx_pause)
+ cmd_bits |= CMD_RX_PAUSE_IGNORE;
}
- /* done if nothing has changed */
- if (!status_changed)
- return;
-
- /* speed */
- if (phydev->speed == SPEED_1000)
- cmd_bits = CMD_SPEED_1000;
- else if (phydev->speed == SPEED_100)
- cmd_bits = CMD_SPEED_100;
- else
- cmd_bits = CMD_SPEED_10;
- cmd_bits <<= CMD_SPEED_SHIFT;
-
- /* duplex */
- if (phydev->duplex != DUPLEX_FULL)
- cmd_bits |= CMD_HD_EN;
-
- /* pause capability */
- if (!phydev->pause)
- cmd_bits |= CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE;
-
- /*
- * Program UMAC and RGMII block based on established
- * link speed, duplex, and pause. The speed set in
- * umac->cmd tell RGMII block which clock to use for
- * transmit -- 25MHz(100Mbps) or 125MHz(1Gbps).
- * Receive clock is provided by the PHY.
- */
- reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL);
- reg &= ~OOB_DISABLE;
- reg |= RGMII_LINK;
- bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
+ /* Manual override */
+ if (!priv->rx_pause)
+ cmd_bits |= CMD_RX_PAUSE_IGNORE;
+ if (!priv->tx_pause)
+ cmd_bits |= CMD_TX_PAUSE_IGNORE;
+ }
- reg = bcmgenet_umac_readl(priv, UMAC_CMD);
- reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) |
- CMD_HD_EN |
- CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE);
- reg |= cmd_bits;
- if (reg & CMD_SW_RESET) {
- reg &= ~CMD_SW_RESET;
- bcmgenet_umac_writel(priv, reg, UMAC_CMD);
- udelay(2);
- reg |= CMD_TX_EN | CMD_RX_EN;
- }
+ /* Program UMAC and RGMII block based on established
+ * link speed, duplex, and pause. The speed set in
+ * umac->cmd tell RGMII block which clock to use for
+ * transmit -- 25MHz(100Mbps) or 125MHz(1Gbps).
+ * Receive clock is provided by the PHY.
+ */
+ reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL);
+ reg &= ~OOB_DISABLE;
+ reg |= RGMII_LINK;
+ bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
+
+ reg = bcmgenet_umac_readl(priv, UMAC_CMD);
+ reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) |
+ CMD_HD_EN |
+ CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE);
+ reg |= cmd_bits;
+ if (reg & CMD_SW_RESET) {
+ reg &= ~CMD_SW_RESET;
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
- } else {
- /* done if nothing has changed */
- if (!status_changed)
- return;
-
- /* needed for MoCA fixed PHY to reflect correct link status */
- netif_carrier_off(dev);
+ udelay(2);
+ reg |= CMD_TX_EN | CMD_RX_EN;
}
+ bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+}
+/* setup netdev link state when PHY link status change and
+ * update UMAC and RGMII block when link up
+ */
+void bcmgenet_mii_setup(struct net_device *dev)
+{
+ struct phy_device *phydev = dev->phydev;
+
+ if (phydev->link)
+ bcmgenet_mac_config(dev);
phy_print_status(phydev);
}
@@ -130,6 +118,21 @@ static int bcmgenet_fixed_phy_link_update(struct net_device *dev,
return 0;
}
+void bcmgenet_phy_pause_set(struct net_device *dev, bool rx, bool tx)
+{
+ struct phy_device *phydev = dev->phydev;
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydev->advertising, rx);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, phydev->advertising,
+ rx | tx);
+ phy_start_aneg(phydev);
+
+ mutex_lock(&phydev->lock);
+ if (phydev->link)
+ bcmgenet_mac_config(dev);
+ mutex_unlock(&phydev->lock);
+}
+
void bcmgenet_phy_power_set(struct net_device *dev, bool enable)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
@@ -286,23 +289,53 @@ int bcmgenet_mii_probe(struct net_device *dev)
struct bcmgenet_priv *priv = netdev_priv(dev);
struct device *kdev = &priv->pdev->dev;
struct device_node *dn = kdev->of_node;
+ phy_interface_t phy_iface = priv->phy_interface;
struct phy_device *phydev;
- u32 phy_flags = 0;
+ u32 phy_flags = PHY_BRCM_AUTO_PWRDWN_ENABLE |
+ PHY_BRCM_DIS_TXCRXC_NOENRGY |
+ PHY_BRCM_IDDQ_SUSPEND;
int ret;
/* Communicate the integrated PHY revision */
if (priv->internal_phy)
phy_flags = priv->gphy_rev;
- /* Initialize link state variables that bcmgenet_mii_setup() uses */
- priv->old_link = -1;
- priv->old_speed = -1;
- priv->old_duplex = -1;
- priv->old_pause = -1;
+ /* This is an ugly quirk but we have not been correctly interpreting
+ * the phy_interface values and we have done that across different
+ * drivers, so at least we are consistent in our mistakes.
+ *
+ * When the Generic PHY driver is in use either the PHY has been
+ * strapped or programmed correctly by the boot loader so we should
+ * stick to our incorrect interpretation since we have validated it.
+ *
+ * Now when a dedicated PHY driver is in use, we need to reverse the
+ * meaning of the phy_interface_mode values to something that the PHY
+ * driver will interpret and act on such that we have two mistakes
+ * canceling themselves so to speak. We only do this for the two
+ * modes that GENET driver officially supports on Broadcom STB chips:
+ * PHY_INTERFACE_MODE_RGMII and PHY_INTERFACE_MODE_RGMII_TXID. Other
+ * modes are not *officially* supported with the boot loader and the
+ * scripted environment generating Device Tree blobs for those
+ * platforms.
+ *
+ * Note that internal PHY, MoCA and fixed-link configurations are not
+ * affected because they use different phy_interface_t values or the
+ * Generic PHY driver.
+ */
+ switch (priv->phy_interface) {
+ case PHY_INTERFACE_MODE_RGMII:
+ phy_iface = PHY_INTERFACE_MODE_RGMII_ID;
+ break;
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ phy_iface = PHY_INTERFACE_MODE_RGMII_RXID;
+ break;
+ default:
+ break;
+ }
if (dn) {
phydev = of_phy_connect(dev, priv->phy_dn, bcmgenet_mii_setup,
- phy_flags, priv->phy_interface);
+ phy_flags, phy_iface);
if (!phydev) {
pr_err("could not attach to PHY\n");
return -ENODEV;
@@ -332,7 +365,7 @@ int bcmgenet_mii_probe(struct net_device *dev)
phydev->dev_flags = phy_flags;
ret = phy_connect_direct(dev, phydev, bcmgenet_mii_setup,
- priv->phy_interface);
+ phy_iface);
if (ret) {
pr_err("could not attach to PHY\n");
return -ENODEV;
@@ -350,8 +383,6 @@ int bcmgenet_mii_probe(struct net_device *dev)
return ret;
}
- linkmode_copy(phydev->advertising, phydev->supported);
-
/* The internal PHY has its link interrupts routed to the
* Ethernet MAC ISRs. On GENETv5 there is a hardware issue
* that prevents the signaling of link UP interrupts when
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 5e0e0e70d801..b1328c5524b5 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -3942,7 +3942,8 @@ static int tg3_load_tso_firmware(struct tg3 *tp)
}
/* tp->lock is held. */
-static void __tg3_set_one_mac_addr(struct tg3 *tp, u8 *mac_addr, int index)
+static void __tg3_set_one_mac_addr(struct tg3 *tp, const u8 *mac_addr,
+ int index)
{
u32 addr_high, addr_low;
@@ -5746,7 +5747,6 @@ static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
udelay(40);
- current_link_up = false;
tp->link_config.rmt_adv = 0;
mac_status = tr32(MAC_STATUS);
@@ -9366,7 +9366,7 @@ static int tg3_set_mac_addr(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, addr->sa_data);
if (!netif_running(dev))
return 0;
@@ -10273,8 +10273,7 @@ static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
- if (tg3_flag(tp, TSO_CAPABLE) &&
- tg3_asic_rev(tp) == ASIC_REV_5705) {
+ if (tg3_flag(tp, TSO_CAPABLE)) {
rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
} else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
!tg3_flag(tp, IS_5788)) {
@@ -11213,12 +11212,8 @@ static void tg3_reset_task(struct work_struct *work)
}
tg3_netif_start(tp);
-
tg3_full_unlock(tp);
-
- if (!err)
- tg3_phy_start(tp);
-
+ tg3_phy_start(tp);
tg3_flag_clear(tp, RESET_TASK_PENDING);
out:
rtnl_unlock();
@@ -16915,19 +16910,18 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
return err;
}
-static int tg3_get_device_address(struct tg3 *tp)
+static int tg3_get_device_address(struct tg3 *tp, u8 *addr)
{
- struct net_device *dev = tp->dev;
u32 hi, lo, mac_offset;
int addr_ok = 0;
int err;
- if (!eth_platform_get_mac_address(&tp->pdev->dev, dev->dev_addr))
+ if (!eth_platform_get_mac_address(&tp->pdev->dev, addr))
return 0;
if (tg3_flag(tp, IS_SSB_CORE)) {
- err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
- if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
+ err = ssb_gige_get_macaddr(tp->pdev, addr);
+ if (!err && is_valid_ether_addr(addr))
return 0;
}
@@ -16951,41 +16945,41 @@ static int tg3_get_device_address(struct tg3 *tp)
/* First try to get it from MAC address mailbox. */
tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
if ((hi >> 16) == 0x484b) {
- dev->dev_addr[0] = (hi >> 8) & 0xff;
- dev->dev_addr[1] = (hi >> 0) & 0xff;
+ addr[0] = (hi >> 8) & 0xff;
+ addr[1] = (hi >> 0) & 0xff;
tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
- dev->dev_addr[2] = (lo >> 24) & 0xff;
- dev->dev_addr[3] = (lo >> 16) & 0xff;
- dev->dev_addr[4] = (lo >> 8) & 0xff;
- dev->dev_addr[5] = (lo >> 0) & 0xff;
+ addr[2] = (lo >> 24) & 0xff;
+ addr[3] = (lo >> 16) & 0xff;
+ addr[4] = (lo >> 8) & 0xff;
+ addr[5] = (lo >> 0) & 0xff;
/* Some old bootcode may report a 0 MAC address in SRAM */
- addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
+ addr_ok = is_valid_ether_addr(addr);
}
if (!addr_ok) {
/* Next, try NVRAM. */
if (!tg3_flag(tp, NO_NVRAM) &&
!tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
!tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
- memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
- memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
+ memcpy(&addr[0], ((char *)&hi) + 2, 2);
+ memcpy(&addr[2], (char *)&lo, sizeof(lo));
}
/* Finally just fetch it out of the MAC control regs. */
else {
hi = tr32(MAC_ADDR_0_HIGH);
lo = tr32(MAC_ADDR_0_LOW);
- dev->dev_addr[5] = lo & 0xff;
- dev->dev_addr[4] = (lo >> 8) & 0xff;
- dev->dev_addr[3] = (lo >> 16) & 0xff;
- dev->dev_addr[2] = (lo >> 24) & 0xff;
- dev->dev_addr[1] = hi & 0xff;
- dev->dev_addr[0] = (hi >> 8) & 0xff;
+ addr[5] = lo & 0xff;
+ addr[4] = (lo >> 8) & 0xff;
+ addr[3] = (lo >> 16) & 0xff;
+ addr[2] = (lo >> 24) & 0xff;
+ addr[1] = hi & 0xff;
+ addr[0] = (hi >> 8) & 0xff;
}
}
- if (!is_valid_ether_addr(&dev->dev_addr[0]))
+ if (!is_valid_ether_addr(addr))
return -EINVAL;
return 0;
}
@@ -17561,6 +17555,7 @@ static int tg3_init_one(struct pci_dev *pdev,
char str[40];
u64 dma_mask, persist_dma_mask;
netdev_features_t features = 0;
+ u8 addr[ETH_ALEN] __aligned(2);
err = pci_enable_device(pdev);
if (err) {
@@ -17783,12 +17778,13 @@ static int tg3_init_one(struct pci_dev *pdev,
tp->rx_pending = 63;
}
- err = tg3_get_device_address(tp);
+ err = tg3_get_device_address(tp, addr);
if (err) {
dev_err(&pdev->dev,
"Could not obtain valid ethernet address, aborting\n");
goto err_out_apeunmap;
}
+ eth_hw_addr_set(dev, addr);
intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index ba47777d9cff..bbdc829c3524 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -875,7 +875,7 @@ bnad_set_netdev_perm_addr(struct bnad *bnad)
ether_addr_copy(netdev->perm_addr, bnad->perm_addr);
if (is_zero_ether_addr(netdev->dev_addr))
- ether_addr_copy(netdev->dev_addr, bnad->perm_addr);
+ eth_hw_addr_set(netdev, bnad->perm_addr);
}
/* Control Path Handlers */
@@ -3249,7 +3249,7 @@ bnad_set_mac_address(struct net_device *netdev, void *addr)
err = bnad_mac_addr_set_locked(bnad, sa->sa_data);
if (!err)
- ether_addr_copy(netdev->dev_addr, sa->sa_data);
+ eth_hw_addr_set(netdev, sa->sa_data);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
@@ -3515,7 +3515,6 @@ static void
bnad_uninit(struct bnad *bnad)
{
if (bnad->work_q) {
- flush_workqueue(bnad->work_q);
destroy_workqueue(bnad->work_q);
bnad->work_q = NULL;
}
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index d8d87213697c..5620b97b3482 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -243,9 +243,11 @@
#define MACB_NCR_TPF_SIZE 1
#define MACB_TZQ_OFFSET 12 /* Transmit zero quantum pause frame */
#define MACB_TZQ_SIZE 1
-#define MACB_SRTSM_OFFSET 15
-#define MACB_OSSMODE_OFFSET 24 /* Enable One Step Synchro Mode */
+#define MACB_SRTSM_OFFSET 15 /* Store Receive Timestamp to Memory */
+#define MACB_OSSMODE_OFFSET 24 /* Enable One Step Synchro Mode */
#define MACB_OSSMODE_SIZE 1
+#define MACB_MIIONRGMII_OFFSET 28 /* MII Usage on RGMII Interface */
+#define MACB_MIIONRGMII_SIZE 1
/* Bitfields in NCFGR */
#define MACB_SPD_OFFSET 0 /* Speed */
@@ -713,6 +715,7 @@
#define MACB_CAPS_GEM_HAS_PTP 0x00000040
#define MACB_CAPS_BD_RD_PREFETCH 0x00000080
#define MACB_CAPS_NEEDS_RSTONUBR 0x00000100
+#define MACB_CAPS_MIIONRGMII 0x00000200
#define MACB_CAPS_CLK_HW_CHG 0x04000000
#define MACB_CAPS_MACB_IS_EMAC 0x08000000
#define MACB_CAPS_FIFO_MODE 0x10000000
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index d13fb1d31821..029dea2873e3 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -313,7 +313,7 @@ static void macb_get_hwaddr(struct macb *bp)
addr[5] = (top >> 8) & 0xff;
if (is_valid_ether_addr(addr)) {
- memcpy(bp->dev->dev_addr, addr, sizeof(addr));
+ eth_hw_addr_set(bp->dev, addr);
return;
}
}
@@ -547,13 +547,8 @@ static void macb_validate(struct phylink_config *config,
if (bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE &&
(state->interface == PHY_INTERFACE_MODE_NA ||
state->interface == PHY_INTERFACE_MODE_10GBASER)) {
- phylink_set(mask, 10000baseCR_Full);
- phylink_set(mask, 10000baseER_Full);
+ phylink_set_10g_modes(mask);
phylink_set(mask, 10000baseKR_Full);
- phylink_set(mask, 10000baseLR_Full);
- phylink_set(mask, 10000baseLRM_Full);
- phylink_set(mask, 10000baseSR_Full);
- phylink_set(mask, 10000baseT_Full);
if (state->interface != PHY_INTERFACE_MODE_NA)
goto out;
}
@@ -684,6 +679,9 @@ static void macb_mac_config(struct phylink_config *config, unsigned int mode,
} else if (state->interface == PHY_INTERFACE_MODE_10GBASER) {
ctrl |= GEM_BIT(PCSSEL);
ncr |= GEM_BIT(ENABLE_HS_MAC);
+ } else if (bp->caps & MACB_CAPS_MIIONRGMII &&
+ bp->phy_interface == PHY_INTERFACE_MODE_MII) {
+ ncr |= MACB_BIT(MIIONRGMII);
}
}
@@ -4594,7 +4592,8 @@ static const struct macb_config zynq_config = {
};
static const struct macb_config sama7g5_gem_config = {
- .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_CLK_HW_CHG,
+ .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_CLK_HW_CHG |
+ MACB_CAPS_MIIONRGMII,
.dma_burst_length = 16,
.clk_init = macb_clk_init,
.init = macb_init,
@@ -4602,7 +4601,8 @@ static const struct macb_config sama7g5_gem_config = {
};
static const struct macb_config sama7g5_emac_config = {
- .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII | MACB_CAPS_USRIO_HAS_CLKEN,
+ .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII |
+ MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_MIIONRGMII,
.dma_burst_length = 16,
.clk_init = macb_clk_init,
.init = macb_init,
@@ -4774,7 +4774,7 @@ static int macb_probe(struct platform_device *pdev)
if (bp->caps & MACB_CAPS_NEEDS_RSTONUBR)
bp->rx_intr_mask |= MACB_BIT(RXUBR);
- err = of_get_mac_address(np, bp->dev->dev_addr);
+ err = of_get_ethdev_address(np, bp->dev);
if (err == -EPROBE_DEFER)
goto err_out_free_netdev;
else if (err)
diff --git a/drivers/net/ethernet/cadence/macb_ptp.c b/drivers/net/ethernet/cadence/macb_ptp.c
index c2e1f163bb14..095c5a2144a7 100644
--- a/drivers/net/ethernet/cadence/macb_ptp.c
+++ b/drivers/net/ethernet/cadence/macb_ptp.c
@@ -38,7 +38,8 @@ static struct macb_dma_desc_ptp *macb_ptp_desc(struct macb *bp,
return NULL;
}
-static int gem_tsu_get_time(struct ptp_clock_info *ptp, struct timespec64 *ts)
+static int gem_tsu_get_time(struct ptp_clock_info *ptp, struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
{
struct macb *bp = container_of(ptp, struct macb, ptp_clock_info);
unsigned long flags;
@@ -46,7 +47,9 @@ static int gem_tsu_get_time(struct ptp_clock_info *ptp, struct timespec64 *ts)
u32 secl, sech;
spin_lock_irqsave(&bp->tsu_clk_lock, flags);
+ ptp_read_system_prets(sts);
first = gem_readl(bp, TN);
+ ptp_read_system_postts(sts);
secl = gem_readl(bp, TSL);
sech = gem_readl(bp, TSH);
second = gem_readl(bp, TN);
@@ -56,7 +59,9 @@ static int gem_tsu_get_time(struct ptp_clock_info *ptp, struct timespec64 *ts)
/* if so, use later read & re-read seconds
* (assume all done within 1s)
*/
+ ptp_read_system_prets(sts);
ts->tv_nsec = gem_readl(bp, TN);
+ ptp_read_system_postts(sts);
secl = gem_readl(bp, TSL);
sech = gem_readl(bp, TSH);
} else {
@@ -161,7 +166,7 @@ static int gem_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
}
if (delta > TSU_NSEC_MAX_VAL) {
- gem_tsu_get_time(&bp->ptp_clock_info, &now);
+ gem_tsu_get_time(&bp->ptp_clock_info, &now, NULL);
now = timespec64_add(now, then);
gem_tsu_set_time(&bp->ptp_clock_info,
@@ -192,7 +197,7 @@ static const struct ptp_clock_info gem_ptp_caps_template = {
.pps = 1,
.adjfine = gem_ptp_adjfine,
.adjtime = gem_ptp_adjtime,
- .gettime64 = gem_tsu_get_time,
+ .gettimex64 = gem_tsu_get_time,
.settime64 = gem_tsu_set_time,
.enable = gem_ptp_enable,
};
@@ -251,7 +256,7 @@ static int gem_hw_timestamp(struct macb *bp, u32 dma_desc_ts_1,
* The timestamp only contains lower few bits of seconds,
* so add value from 1588 timer
*/
- gem_tsu_get_time(&bp->ptp_clock_info, &tsu);
+ gem_tsu_get_time(&bp->ptp_clock_info, &tsu, NULL);
/* If the top bit is set in the timestamp,
* but not in 1588 timer, it has rolled over,
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
index b6a066404f4b..457cb7121000 100644
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -607,7 +607,7 @@ static inline void xgmac_mac_disable(void __iomem *ioaddr)
writel(value, ioaddr + XGMAC_CONTROL);
}
-static void xgmac_set_mac_addr(void __iomem *ioaddr, unsigned char *addr,
+static void xgmac_set_mac_addr(void __iomem *ioaddr, const unsigned char *addr,
int num)
{
u32 data;
@@ -1479,7 +1479,7 @@ static int xgmac_set_mac_address(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, addr->sa_data);
xgmac_set_mac_addr(ioaddr, dev->dev_addr, 0);
@@ -1693,6 +1693,7 @@ static int xgmac_probe(struct platform_device *pdev)
struct resource *res;
struct net_device *ndev = NULL;
struct xgmac_priv *priv = NULL;
+ u8 addr[ETH_ALEN];
u32 uid;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1785,7 +1786,8 @@ static int xgmac_probe(struct platform_device *pdev)
ndev->max_mtu = XGMAC_MAX_MTU;
/* Get the MAC address */
- xgmac_get_mac_addr(priv->base, ndev->dev_addr, 0);
+ xgmac_get_mac_addr(priv->base, addr, 0);
+ eth_hw_addr_set(ndev, addr);
if (!is_valid_ether_addr(ndev->dev_addr))
netdev_warn(ndev, "MAC address %pM not valid",
ndev->dev_addr);
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_core.c b/drivers/net/ethernet/cavium/liquidio/lio_core.c
index 2a0d64e5797c..73cb03266549 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_core.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_core.c
@@ -411,7 +411,7 @@ void octeon_pf_changed_vf_macaddr(struct octeon_device *oct, u8 *mac)
if (!ether_addr_equal(netdev->dev_addr, mac)) {
macaddr_changed = true;
- ether_addr_copy(netdev->dev_addr, mac);
+ eth_hw_addr_set(netdev, mac);
ether_addr_copy(((u8 *)&lio->linfo.hw_addr) + 2, mac);
call_netdevice_notifiers(NETDEV_CHANGEADDR, netdev);
}
@@ -490,7 +490,6 @@ void cleanup_rx_oom_poll_fn(struct net_device *netdev)
wq = &lio->rxq_status_wq[q_no];
if (wq->wq) {
cancel_delayed_work_sync(&wq->wk.work);
- flush_workqueue(wq->wq);
destroy_workqueue(wq->wq);
wq->wq = NULL;
}
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index 2907e13b9df6..1daf63e437ce 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -1279,6 +1279,14 @@ static int liquidio_stop_nic_module(struct octeon_device *oct)
struct lio *lio;
dev_dbg(&oct->pci_dev->dev, "Stopping network interfaces\n");
+ device_lock(&oct->pci_dev->dev);
+ if (oct->devlink) {
+ devlink_unregister(oct->devlink);
+ devlink_free(oct->devlink);
+ oct->devlink = NULL;
+ }
+ device_unlock(&oct->pci_dev->dev);
+
if (!oct->ifcount) {
dev_err(&oct->pci_dev->dev, "Init for Octeon was not completed\n");
return 1;
@@ -1300,12 +1308,6 @@ static int liquidio_stop_nic_module(struct octeon_device *oct)
for (i = 0; i < oct->ifcount; i++)
liquidio_destroy_nic_device(oct, i);
- if (oct->devlink) {
- devlink_unregister(oct->devlink);
- devlink_free(oct->devlink);
- oct->devlink = NULL;
- }
-
dev_dbg(&oct->pci_dev->dev, "Network interfaces stopped\n");
return 0;
}
@@ -2022,7 +2024,7 @@ static int liquidio_set_mac(struct net_device *netdev, void *p)
return -EIO;
}
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, addr->sa_data);
memcpy(((u8 *)&lio->linfo.hw_addr) + 2, addr->sa_data, ETH_ALEN);
return 0;
@@ -3632,7 +3634,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
/* Copy MAC Address to OS network device structure */
- ether_addr_copy(netdev->dev_addr, mac);
+ eth_hw_addr_set(netdev, mac);
/* By default all interfaces on a single Octeon uses the same
* tx and rx queues
@@ -3749,10 +3751,12 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
}
}
+ device_lock(&octeon_dev->pci_dev->dev);
devlink = devlink_alloc(&liquidio_devlink_ops,
sizeof(struct lio_devlink_priv),
&octeon_dev->pci_dev->dev);
if (!devlink) {
+ device_unlock(&octeon_dev->pci_dev->dev);
dev_err(&octeon_dev->pci_dev->dev, "devlink alloc failed\n");
goto setup_nic_dev_free;
}
@@ -3760,15 +3764,10 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
lio_devlink = devlink_priv(devlink);
lio_devlink->oct = octeon_dev;
- if (devlink_register(devlink)) {
- devlink_free(devlink);
- dev_err(&octeon_dev->pci_dev->dev,
- "devlink registration failed\n");
- goto setup_nic_dev_free;
- }
-
octeon_dev->devlink = devlink;
octeon_dev->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY;
+ devlink_register(devlink);
+ device_unlock(&octeon_dev->pci_dev->dev);
return 0;
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
index f6396ac64006..c607756b731f 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
@@ -1168,7 +1168,7 @@ static int liquidio_set_mac(struct net_device *netdev, void *p)
return -EPERM;
}
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, addr->sa_data);
ether_addr_copy(((u8 *)&lio->linfo.hw_addr) + 2, addr->sa_data);
return 0;
@@ -2148,7 +2148,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
mac[j] = *((u8 *)(((u8 *)&lio->linfo.hw_addr) + 2 + j));
/* Copy MAC Address to OS network device structure */
- ether_addr_copy(netdev->dev_addr, mac);
+ eth_hw_addr_set(netdev, mac);
if (liquidio_setup_io_queues(octeon_dev, i,
lio->linfo.num_txpciq,
diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
index 30463a6d1f8c..4e39d712e121 100644
--- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
@@ -1501,7 +1501,7 @@ static int octeon_mgmt_probe(struct platform_device *pdev)
netdev->min_mtu = 64 - OCTEON_MGMT_RX_HEADROOM;
netdev->max_mtu = 16383 - OCTEON_MGMT_RX_HEADROOM - VLAN_HLEN;
- result = of_get_mac_address(pdev->dev.of_node, netdev->dev_addr);
+ result = of_get_ethdev_address(pdev->dev.of_node, netdev);
if (result)
eth_hw_addr_random(netdev);
diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c
index 0fbecd093fa1..f2f1ce81fd9c 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nic_main.c
@@ -1311,9 +1311,8 @@ static int nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = pci_enable_device(pdev);
if (err) {
- dev_err(dev, "Failed to enable PCI device\n");
pci_set_drvdata(pdev, NULL);
- return err;
+ return dev_err_probe(dev, err, "Failed to enable PCI device\n");
}
err = pci_request_regions(pdev, DRV_NAME);
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index a27227aeae88..bb45d5df2856 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -221,8 +221,7 @@ static void nicvf_handle_mbx_intr(struct nicvf *nic)
nic->tns_mode = mbx.nic_cfg.tns_mode & 0x7F;
nic->node = mbx.nic_cfg.node_id;
if (!nic->set_mac_pending)
- ether_addr_copy(nic->netdev->dev_addr,
- mbx.nic_cfg.mac_addr);
+ eth_hw_addr_set(nic->netdev, mbx.nic_cfg.mac_addr);
nic->sqs_mode = mbx.nic_cfg.sqs_mode;
nic->loopback_supported = mbx.nic_cfg.loopback_supported;
nic->link_up = false;
@@ -1612,7 +1611,7 @@ static int nicvf_set_mac_address(struct net_device *netdev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, addr->sa_data);
if (nic->pdev->msix_enabled) {
if (nicvf_hw_set_mac_addr(nic, netdev))
@@ -2119,10 +2118,8 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
err = pci_enable_device(pdev);
- if (err) {
- dev_err(dev, "Failed to enable PCI device\n");
- return err;
- }
+ if (err)
+ return dev_err_probe(dev, err, "Failed to enable PCI device\n");
err = pci_request_regions(pdev, DRV_NAME);
if (err) {
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index c36fed9c3d73..574a32f23f96 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -1387,10 +1387,10 @@ static int acpi_get_mac_address(struct device *dev, struct acpi_device *adev,
u8 *dst)
{
u8 mac[ETH_ALEN];
- u8 *addr;
+ int ret;
- addr = fwnode_get_mac_address(acpi_fwnode_handle(adev), mac, ETH_ALEN);
- if (!addr) {
+ ret = fwnode_get_mac_address(acpi_fwnode_handle(adev), mac);
+ if (ret) {
dev_err(dev, "MAC address invalid: %pM\n", mac);
return -EINVAL;
}
@@ -1597,9 +1597,8 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = pcim_enable_device(pdev);
if (err) {
- dev_err(dev, "Failed to enable PCI device\n");
pci_set_drvdata(pdev, NULL);
- return err;
+ return dev_err_probe(dev, err, "Failed to enable PCI device\n");
}
err = pci_request_regions(pdev, DRV_NAME);
diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
index d246eee4b6d5..609820e214a3 100644
--- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
+++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
@@ -853,7 +853,7 @@ static int t1_set_mac_addr(struct net_device *dev, void *p)
if (!mac->ops->macaddress_set)
return -EOPNOTSUPP;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, addr->sa_data);
mac->ops->macaddress_set(mac, dev->dev_addr);
return 0;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb/gmac.h b/drivers/net/ethernet/chelsio/cxgb/gmac.h
index dfa77491a910..5913eaf442b5 100644
--- a/drivers/net/ethernet/chelsio/cxgb/gmac.h
+++ b/drivers/net/ethernet/chelsio/cxgb/gmac.h
@@ -117,7 +117,7 @@ struct cmac_ops {
const struct cmac_statistics *(*statistics_update)(struct cmac *, int);
int (*macaddress_get)(struct cmac *, u8 mac_addr[6]);
- int (*macaddress_set)(struct cmac *, u8 mac_addr[6]);
+ int (*macaddress_set)(struct cmac *, const u8 mac_addr[6]);
};
typedef struct _cmac_instance cmac_instance;
diff --git a/drivers/net/ethernet/chelsio/cxgb/pm3393.c b/drivers/net/ethernet/chelsio/cxgb/pm3393.c
index c27908e66f5e..0bb37e4680c7 100644
--- a/drivers/net/ethernet/chelsio/cxgb/pm3393.c
+++ b/drivers/net/ethernet/chelsio/cxgb/pm3393.c
@@ -496,7 +496,7 @@ static int pm3393_macaddress_get(struct cmac *cmac, u8 mac_addr[6])
return 0;
}
-static int pm3393_macaddress_set(struct cmac *cmac, u8 ma[6])
+static int pm3393_macaddress_set(struct cmac *cmac, const u8 ma[6])
{
u32 val, lo, mid, hi, enabled = cmac->instance->enabled;
diff --git a/drivers/net/ethernet/chelsio/cxgb/subr.c b/drivers/net/ethernet/chelsio/cxgb/subr.c
index 310add28fcf5..007c591b8bf5 100644
--- a/drivers/net/ethernet/chelsio/cxgb/subr.c
+++ b/drivers/net/ethernet/chelsio/cxgb/subr.c
@@ -1140,7 +1140,7 @@ int t1_init_sw_modules(adapter_t *adapter, const struct board_info *bi)
adapter->port[i].dev->name);
goto error;
}
- memcpy(adapter->port[i].dev->dev_addr, hw_addr, ETH_ALEN);
+ eth_hw_addr_set(adapter->port[i].dev, hw_addr);
init_link_config(&adapter->port[i].link_config, bi);
}
diff --git a/drivers/net/ethernet/chelsio/cxgb/vsc7326.c b/drivers/net/ethernet/chelsio/cxgb/vsc7326.c
index 873c1c7b4ca0..2ad3efb550c2 100644
--- a/drivers/net/ethernet/chelsio/cxgb/vsc7326.c
+++ b/drivers/net/ethernet/chelsio/cxgb/vsc7326.c
@@ -379,7 +379,7 @@ static int mac_intr_clear(struct cmac *mac)
}
/* Expect MAC address to be in network byte order. */
-static int mac_set_address(struct cmac* mac, u8 addr[6])
+static int mac_set_address(struct cmac* mac, const u8 addr[6])
{
u32 val;
int port = mac->instance->index;
@@ -591,7 +591,7 @@ static void port_stats_update(struct cmac *mac)
} hw_stats[] = {
#define HW_STAT(reg, stat_name) \
- { reg, (&((struct cmac_statistics *)NULL)->stat_name) - (u64 *)NULL }
+ { reg, offsetof(struct cmac_statistics, stat_name) / sizeof(u64) }
/* Rx stats */
HW_STAT(RxUnicast, RxUnicastFramesOK),
diff --git a/drivers/net/ethernet/chelsio/cxgb3/common.h b/drivers/net/ethernet/chelsio/cxgb3/common.h
index b706f2fbe4f4..a309016f7f8c 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/common.h
+++ b/drivers/net/ethernet/chelsio/cxgb3/common.h
@@ -710,7 +710,7 @@ int t3_mac_enable(struct cmac *mac, int which);
int t3_mac_disable(struct cmac *mac, int which);
int t3_mac_set_mtu(struct cmac *mac, unsigned int mtu);
int t3_mac_set_rx_mode(struct cmac *mac, struct net_device *dev);
-int t3_mac_set_address(struct cmac *mac, unsigned int idx, u8 addr[6]);
+int t3_mac_set_address(struct cmac *mac, unsigned int idx, const u8 addr[6]);
int t3_mac_set_num_ucast(struct cmac *mac, int n);
const struct mac_stats *t3_mac_update_stats(struct cmac *mac);
int t3_mac_set_speed_duplex_fc(struct cmac *mac, int speed, int duplex, int fc);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index 38e47703f9ab..9cf9e33664e4 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -2586,7 +2586,7 @@ static int cxgb_set_mac_addr(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, addr->sa_data);
t3_mac_set_address(&pi->mac, LAN_MAC_IDX, dev->dev_addr);
if (offload_running(adapter))
write_smt_entry(adapter, pi->port_id);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
index 7ff31d1026fb..53feac8da503 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
@@ -29,6 +29,7 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
+#include <linux/etherdevice.h>
#include "common.h"
#include "regs.h"
#include "sge_defs.h"
@@ -3758,8 +3759,7 @@ int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
memcpy(hw_addr, adapter->params.vpd.eth_base, 5);
hw_addr[5] = adapter->params.vpd.eth_base[5] + i;
- memcpy(adapter->port[i]->dev_addr, hw_addr,
- ETH_ALEN);
+ eth_hw_addr_set(adapter->port[i], hw_addr);
init_link_config(&p->link_config, p->phy.caps);
p->phy.ops->power_down(&p->phy, 1);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/xgmac.c b/drivers/net/ethernet/chelsio/cxgb3/xgmac.c
index 3af19a550372..1bdc6cad1e49 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/xgmac.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/xgmac.c
@@ -240,7 +240,7 @@ static void set_addr_filter(struct cmac *mac, int idx, const u8 * addr)
}
/* Set one of the station's unicast MAC addresses. */
-int t3_mac_set_address(struct cmac *mac, unsigned int idx, u8 addr[6])
+int t3_mac_set_address(struct cmac *mac, unsigned int idx, const u8 addr[6])
{
if (idx >= mac->nucast)
return -EINVAL;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index ecea3cdd30b3..5657ac8cfca0 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -1545,7 +1545,7 @@ static inline void t4_write_reg64(struct adapter *adap, u32 reg_addr, u64 val)
static inline void t4_set_hw_addr(struct adapter *adapter, int port_idx,
u8 hw_addr[])
{
- ether_addr_copy(adapter->port[port_idx]->dev_addr, hw_addr);
+ eth_hw_addr_set(adapter->port[port_idx], hw_addr);
ether_addr_copy(adapter->port[port_idx]->perm_addr, hw_addr);
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 0d9cda4ab303..dde1cf51d0ab 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -3468,7 +3468,7 @@ static int cxgb_set_mac_addr(struct net_device *dev, void *p)
if (ret < 0)
return ret;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, addr->sa_data);
return 0;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 64144b6171d7..e7b4e3ed056c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -9706,7 +9706,7 @@ int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
if (ret)
return ret;
- memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN);
+ eth_hw_addr_set(adap->port[i], addr);
j++;
}
return 0;
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
index f55105a4112f..03cb1410d6fc 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
@@ -40,6 +40,7 @@
#ifndef __CXGB4VF_ADAPTER_H__
#define __CXGB4VF_ADAPTER_H__
+#include <linux/etherdevice.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/spinlock.h>
@@ -507,7 +508,7 @@ static inline const char *port_name(struct adapter *adapter, int pidx)
static inline void t4_os_set_hw_addr(struct adapter *adapter, int pidx,
u8 hw_addr[])
{
- memcpy(adapter->port[pidx]->dev_addr, hw_addr, ETH_ALEN);
+ eth_hw_addr_set(adapter->port[pidx], hw_addr);
}
/**
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index 49b76fd47daa..64479c464b4e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -1218,7 +1218,7 @@ static int cxgb4vf_set_mac_addr(struct net_device *dev, void *_addr)
if (ret < 0)
return ret;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, addr->sa_data);
return 0;
}
@@ -2902,10 +2902,8 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev,
* Initialize generic PCI device state.
*/
err = pci_enable_device(pdev);
- if (err) {
- dev_err(&pdev->dev, "cannot enable PCI device\n");
- return err;
- }
+ if (err)
+ return dev_err_probe(&pdev->dev, err, "cannot enable PCI device\n");
/*
* Reserve PCI resources for the device. If we can't get them some
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
index bcad69c48074..4af5561cbfc5 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
@@ -870,7 +870,7 @@ static void do_abort_syn_rcv(struct sock *child, struct sock *parent)
* created only after 3 way handshake is done.
*/
sock_orphan(child);
- percpu_counter_inc((child)->sk_prot->orphan_count);
+ INC_ORPHAN_COUNT(child);
chtls_release_resources(child);
chtls_conn_done(child);
} else {
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.h b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.h
index b1161bdeda4d..f61ca657601c 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.h
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.h
@@ -95,7 +95,7 @@ struct deferred_skb_cb {
#define WSCALE_OK(tp) ((tp)->rx_opt.wscale_ok)
#define TSTAMP_OK(tp) ((tp)->rx_opt.tstamp_ok)
#define SACK_OK(tp) ((tp)->rx_opt.sack_ok)
-#define INC_ORPHAN_COUNT(sk) percpu_counter_inc((sk)->sk_prot->orphan_count)
+#define INC_ORPHAN_COUNT(sk) this_cpu_inc(*(sk)->sk_prot->orphan_count)
/* TLS SKB */
#define skb_ulp_tls_inline(skb) (ULP_SKB_CB(skb)->ulp.tls.ofld)
diff --git a/drivers/net/ethernet/cirrus/cs89x0.c b/drivers/net/ethernet/cirrus/cs89x0.c
index d0c4c8b7a15a..4a97aa8e1387 100644
--- a/drivers/net/ethernet/cirrus/cs89x0.c
+++ b/drivers/net/ethernet/cirrus/cs89x0.c
@@ -1227,7 +1227,7 @@ static int set_mac_address(struct net_device *dev, void *p)
if (netif_running(dev))
return -EBUSY;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, addr->sa_data);
cs89_dbg(0, debug, "%s: Setting MAC address to %pM\n",
dev->name, dev->dev_addr);
@@ -1314,6 +1314,7 @@ cs89x0_probe1(struct net_device *dev, void __iomem *ioaddr, int modular)
int tmp;
unsigned rev_type = 0;
int eeprom_buff[CHKSUM_LEN];
+ u8 addr[ETH_ALEN];
int retval;
/* Initialize the device structure. */
@@ -1387,9 +1388,10 @@ cs89x0_probe1(struct net_device *dev, void __iomem *ioaddr, int modular)
for (i = 0; i < ETH_ALEN / 2; i++) {
unsigned int Addr;
Addr = readreg(dev, PP_IA + i * 2);
- dev->dev_addr[i * 2] = Addr & 0xFF;
- dev->dev_addr[i * 2 + 1] = Addr >> 8;
+ addr[i * 2] = Addr & 0xFF;
+ addr[i * 2 + 1] = Addr >> 8;
}
+ eth_hw_addr_set(dev, addr);
/* Load the Adapter Configuration.
* Note: Barring any more specific information from some
@@ -1464,9 +1466,10 @@ cs89x0_probe1(struct net_device *dev, void __iomem *ioaddr, int modular)
/* eeprom_buff has 32-bit ints, so we can't just memcpy it */
/* store the initial memory base address */
for (i = 0; i < ETH_ALEN / 2; i++) {
- dev->dev_addr[i * 2] = eeprom_buff[i];
- dev->dev_addr[i * 2 + 1] = eeprom_buff[i] >> 8;
+ addr[i * 2] = eeprom_buff[i];
+ addr[i * 2 + 1] = eeprom_buff[i] >> 8;
}
+ eth_hw_addr_set(dev, addr);
cs89_dbg(1, debug, "%s: new adapter_cnf: 0x%x\n",
dev->name, lp->adapter_cnf);
}
diff --git a/drivers/net/ethernet/cirrus/ep93xx_eth.c b/drivers/net/ethernet/cirrus/ep93xx_eth.c
index 072fac5f5d24..21ba6e893072 100644
--- a/drivers/net/ethernet/cirrus/ep93xx_eth.c
+++ b/drivers/net/ethernet/cirrus/ep93xx_eth.c
@@ -746,7 +746,7 @@ static struct net_device *ep93xx_dev_alloc(struct ep93xx_eth_data *data)
if (dev == NULL)
return NULL;
- memcpy(dev->dev_addr, data->dev_addr, ETH_ALEN);
+ eth_hw_addr_set(dev, data->dev_addr);
dev->ethtool_ops = &ep93xx_ethtool_ops;
dev->netdev_ops = &ep93xx_netdev_ops;
diff --git a/drivers/net/ethernet/cirrus/mac89x0.c b/drivers/net/ethernet/cirrus/mac89x0.c
index 6324e80960c3..84251b85fc93 100644
--- a/drivers/net/ethernet/cirrus/mac89x0.c
+++ b/drivers/net/ethernet/cirrus/mac89x0.c
@@ -541,7 +541,7 @@ static int set_mac_address(struct net_device *dev, void *addr)
if (!is_valid_ether_addr(saddr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN);
+ eth_hw_addr_set(dev, saddr->sa_data);
netdev_info(dev, "Setting MAC address to %pM\n", dev->dev_addr);
/* set the Ethernet address */
diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
index 12ffc14fbecd..6ded4d9fa32a 100644
--- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c
+++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
@@ -139,7 +139,7 @@ static void enic_get_drvinfo(struct net_device *netdev,
int err;
err = enic_dev_fw_info(enic, &fw_info);
- /* return only when pci_zalloc_consistent fails in vnic_dev_fw_info
+ /* return only when dma_alloc_coherent fails in vnic_dev_fw_info
* For other failures, like devcmd failure, we return previously
* recorded info.
*/
@@ -270,7 +270,7 @@ static void enic_get_ethtool_stats(struct net_device *netdev,
int err;
err = enic_dev_stats_dump(enic, &vstats);
- /* return only when pci_zalloc_consistent fails in vnic_dev_stats_dump
+ /* return only when dma_alloc_coherent fails in vnic_dev_stats_dump
* For other failures, like devcmd failure, we return previously
* recorded stats.
*/
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index d0a8f7106958..aacf141986d5 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -882,7 +882,7 @@ static void enic_get_stats(struct net_device *netdev,
int err;
err = enic_dev_stats_dump(enic, &stats);
- /* return only when pci_zalloc_consistent fails in vnic_dev_stats_dump
+ /* return only when dma_alloc_coherent fails in vnic_dev_stats_dump
* For other failures, like devcmd failure, we return previously
* recorded stats.
*/
@@ -985,7 +985,7 @@ static int enic_set_mac_addr(struct net_device *netdev, char *addr)
return -EADDRNOTAVAIL;
}
- memcpy(netdev->dev_addr, addr, netdev->addr_len);
+ eth_hw_addr_set(netdev, addr);
return 0;
}
@@ -1098,6 +1098,7 @@ static int enic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
static int enic_set_vf_port(struct net_device *netdev, int vf,
struct nlattr *port[])
{
+ static const u8 zero_addr[ETH_ALEN] = {};
struct enic *enic = netdev_priv(netdev);
struct enic_port_profile prev_pp;
struct enic_port_profile *pp;
@@ -1162,7 +1163,7 @@ static int enic_set_vf_port(struct net_device *netdev, int vf,
} else {
memset(pp, 0, sizeof(*pp));
if (vf == PORT_SELF_VF)
- eth_zero_addr(netdev->dev_addr);
+ eth_hw_addr_set(netdev, zero_addr);
}
} else {
/* Set flag to indicate that the port assoc/disassoc
@@ -1174,7 +1175,7 @@ static int enic_set_vf_port(struct net_device *netdev, int vf,
if (pp->request == PORT_REQUEST_DISASSOCIATE) {
eth_zero_addr(pp->mac_addr);
if (vf == PORT_SELF_VF)
- eth_zero_addr(netdev->dev_addr);
+ eth_hw_addr_set(netdev, zero_addr);
}
}
diff --git a/drivers/net/ethernet/cisco/enic/enic_pp.c b/drivers/net/ethernet/cisco/enic/enic_pp.c
index e6a83198c3dd..80f46dbd5117 100644
--- a/drivers/net/ethernet/cisco/enic/enic_pp.c
+++ b/drivers/net/ethernet/cisco/enic/enic_pp.c
@@ -73,9 +73,9 @@ static int enic_set_port_profile(struct enic *enic, int vf)
struct vic_provinfo *vp;
const u8 oui[3] = VIC_PROVINFO_CISCO_OUI;
const __be16 os_type = htons(VIC_GENERIC_PROV_OS_TYPE_LINUX);
+ const u8 *client_mac;
char uuid_str[38];
char client_mac_str[18];
- u8 *client_mac;
int err;
ENIC_PP_BY_INDEX(enic, vf, pp, &err);
diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
index 6e745ca4c433..941f175fb911 100644
--- a/drivers/net/ethernet/cortina/gemini.c
+++ b/drivers/net/ethernet/cortina/gemini.c
@@ -1889,7 +1889,7 @@ static int gmac_set_mac_address(struct net_device *netdev, void *addr)
{
struct sockaddr *sa = addr;
- memcpy(netdev->dev_addr, sa->sa_data, ETH_ALEN);
+ eth_hw_addr_set(netdev, sa->sa_data);
gmac_write_mac_address(netdev);
return 0;
@@ -2467,13 +2467,13 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
DEFAULT_NAPI_WEIGHT);
if (is_valid_ether_addr((void *)port->mac_addr)) {
- memcpy(netdev->dev_addr, port->mac_addr, ETH_ALEN);
+ eth_hw_addr_set(netdev, (u8 *)port->mac_addr);
} else {
dev_dbg(dev, "ethernet address 0x%08x%08x%08x invalid\n",
port->mac_addr[0], port->mac_addr[1],
port->mac_addr[2]);
dev_info(dev, "using a random ethernet address\n");
- eth_random_addr(netdev->dev_addr);
+ eth_hw_addr_random(netdev);
}
gmac_write_mac_address(netdev);
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index e842de6f6635..0985ab216566 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -1425,6 +1425,7 @@ dm9000_probe(struct platform_device *pdev)
enum of_gpio_flags flags;
struct regulator *power;
bool inv_mac_addr = false;
+ u8 addr[ETH_ALEN];
power = devm_regulator_get(dev, "vcc");
if (IS_ERR(power)) {
@@ -1666,11 +1667,12 @@ dm9000_probe(struct platform_device *pdev)
/* try reading the node address from the attached EEPROM */
for (i = 0; i < 6; i += 2)
- dm9000_read_eeprom(db, i / 2, ndev->dev_addr+i);
+ dm9000_read_eeprom(db, i / 2, addr + i);
+ eth_hw_addr_set(ndev, addr);
if (!is_valid_ether_addr(ndev->dev_addr) && pdata != NULL) {
mac_src = "platform data";
- memcpy(ndev->dev_addr, pdata->dev_addr, ETH_ALEN);
+ eth_hw_addr_set(ndev, pdata->dev_addr);
}
if (!is_valid_ether_addr(ndev->dev_addr)) {
@@ -1678,7 +1680,8 @@ dm9000_probe(struct platform_device *pdev)
mac_src = "chip";
for (i = 0; i < 6; i++)
- ndev->dev_addr[i] = ior(db, i+DM9000_PAR);
+ addr[i] = ior(db, i + DM9000_PAR);
+ eth_hw_addr_set(ndev, pdata->dev_addr);
}
if (!is_valid_ether_addr(ndev->dev_addr)) {
diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c
index 117c26fa5909..d51b3d24a0c8 100644
--- a/drivers/net/ethernet/dec/tulip/de2104x.c
+++ b/drivers/net/ethernet/dec/tulip/de2104x.c
@@ -666,8 +666,8 @@ static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
struct de_private *de = netdev_priv(dev);
u16 hash_table[32];
struct netdev_hw_addr *ha;
+ const u16 *eaddrs;
int i;
- u16 *eaddrs;
memset(hash_table, 0, sizeof(hash_table));
__set_bit_le(255, hash_table); /* Broadcast entry */
@@ -685,7 +685,7 @@ static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
setup_frm = &de->setup_frame[13*6];
/* Fill the final entry with our physical address. */
- eaddrs = (u16 *)dev->dev_addr;
+ eaddrs = (const u16 *)dev->dev_addr;
*setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
*setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
*setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
@@ -695,7 +695,7 @@ static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev)
{
struct de_private *de = netdev_priv(dev);
struct netdev_hw_addr *ha;
- u16 *eaddrs;
+ const u16 *eaddrs;
/* We have <= 14 addresses so we can use the wonderful
16 address perfect filtering of the Tulip. */
@@ -710,7 +710,7 @@ static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev)
setup_frm = &de->setup_frame[15*6];
/* Fill the final entry with our physical address. */
- eaddrs = (u16 *)dev->dev_addr;
+ eaddrs = (const u16 *)dev->dev_addr;
*setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
*setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
*setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
@@ -1713,6 +1713,7 @@ static const struct ethtool_ops de_ethtool_ops = {
static void de21040_get_mac_address(struct de_private *de)
{
+ u8 addr[ETH_ALEN];
unsigned i;
dw32 (ROMCmd, 0); /* Reset the pointer with a dummy write. */
@@ -1724,12 +1725,13 @@ static void de21040_get_mac_address(struct de_private *de)
value = dr32(ROMCmd);
rmb();
} while (value < 0 && --boguscnt > 0);
- de->dev->dev_addr[i] = value;
+ addr[i] = value;
udelay(1);
if (boguscnt <= 0)
pr_warn("timeout reading 21040 MAC address byte %u\n",
i);
}
+ eth_hw_addr_set(de->dev, addr);
}
static void de21040_get_media_info(struct de_private *de)
@@ -1821,8 +1823,7 @@ static void de21041_get_srom_info(struct de_private *de)
#endif
/* store MAC address */
- for (i = 0; i < 6; i ++)
- de->dev->dev_addr[i] = ee_data[i + sa_offset];
+ eth_hw_addr_set(de->dev, &ee_data[sa_offset]);
/* get offset of controller 0 info leaf. ignore 2nd byte. */
ofs = ee_data[SROMC0InfoLeaf];
diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
index 36ab4cbf2ad0..13121c4dcfe6 100644
--- a/drivers/net/ethernet/dec/tulip/de4x5.c
+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
@@ -4031,6 +4031,7 @@ get_hw_addr(struct net_device *dev)
int broken, i, k, tmp, status = 0;
u_short j,chksum;
struct de4x5_private *lp = netdev_priv(dev);
+ u8 addr[ETH_ALEN];
broken = de4x5_bad_srom(lp);
@@ -4042,28 +4043,30 @@ get_hw_addr(struct net_device *dev)
if (lp->chipset == DC21040) {
while ((tmp = inl(DE4X5_APROM)) < 0);
k += (u_char) tmp;
- dev->dev_addr[i++] = (u_char) tmp;
+ addr[i++] = (u_char) tmp;
while ((tmp = inl(DE4X5_APROM)) < 0);
k += (u_short) (tmp << 8);
- dev->dev_addr[i++] = (u_char) tmp;
+ addr[i++] = (u_char) tmp;
} else if (!broken) {
- dev->dev_addr[i] = (u_char) lp->srom.ieee_addr[i]; i++;
- dev->dev_addr[i] = (u_char) lp->srom.ieee_addr[i]; i++;
+ addr[i] = (u_char) lp->srom.ieee_addr[i]; i++;
+ addr[i] = (u_char) lp->srom.ieee_addr[i]; i++;
} else if ((broken == SMC) || (broken == ACCTON)) {
- dev->dev_addr[i] = *((u_char *)&lp->srom + i); i++;
- dev->dev_addr[i] = *((u_char *)&lp->srom + i); i++;
+ addr[i] = *((u_char *)&lp->srom + i); i++;
+ addr[i] = *((u_char *)&lp->srom + i); i++;
}
} else {
k += (u_char) (tmp = inb(EISA_APROM));
- dev->dev_addr[i++] = (u_char) tmp;
+ addr[i++] = (u_char) tmp;
k += (u_short) ((tmp = inb(EISA_APROM)) << 8);
- dev->dev_addr[i++] = (u_char) tmp;
+ addr[i++] = (u_char) tmp;
}
if (k > 0xffff) k-=0xffff;
}
if (k == 0xffff) k=0;
+ eth_hw_addr_set(dev, addr);
+
if (lp->bus == PCI) {
if (lp->chipset == DC21040) {
while ((tmp = inl(DE4X5_APROM)) < 0);
@@ -4095,8 +4098,9 @@ get_hw_addr(struct net_device *dev)
int x = dev->dev_addr[i];
x = ((x & 0xf) << 4) + ((x & 0xf0) >> 4);
x = ((x & 0x33) << 2) + ((x & 0xcc) >> 2);
- dev->dev_addr[i] = ((x & 0x55) << 1) + ((x & 0xaa) >> 1);
+ addr[i] = ((x & 0x55) << 1) + ((x & 0xaa) >> 1);
}
+ eth_hw_addr_set(dev, addr);
}
#endif /* CONFIG_PPC_PMAC */
@@ -4158,12 +4162,9 @@ test_bad_enet(struct net_device *dev, int status)
if ((tmp == 0) || (tmp == 0x5fa)) {
if ((lp->chipset == last.chipset) &&
(lp->bus_num == last.bus) && (lp->bus_num > 0)) {
- for (i=0; i<ETH_ALEN; i++) dev->dev_addr[i] = last.addr[i];
- for (i=ETH_ALEN-1; i>2; --i) {
- dev->dev_addr[i] += 1;
- if (dev->dev_addr[i] != 0) break;
- }
- for (i=0; i<ETH_ALEN; i++) last.addr[i] = dev->dev_addr[i];
+ eth_addr_inc(last.addr);
+ eth_hw_addr_set(dev, last.addr);
+
if (!an_exception(lp)) {
dev->irq = last.irq;
}
@@ -5391,9 +5392,7 @@ de4x5_siocdevprivate(struct net_device *dev, struct ifreq *rq, void __user *data
if (netif_queue_stopped(dev))
return -EBUSY;
netif_stop_queue(dev);
- for (i=0; i<ETH_ALEN; i++) {
- dev->dev_addr[i] = tmp.addr[i];
- }
+ eth_hw_addr_set(dev, tmp.addr);
build_setup_frame(dev, PHYS_ADDR_ONLY);
/* Set up the descriptor and give ownership to the card */
load_packet(dev, lp->setup_frame, TD_IC | PERFECT_F | TD_SET |
diff --git a/drivers/net/ethernet/dec/tulip/dmfe.c b/drivers/net/ethernet/dec/tulip/dmfe.c
index c763b692e164..83f1727d1423 100644
--- a/drivers/net/ethernet/dec/tulip/dmfe.c
+++ b/drivers/net/ethernet/dec/tulip/dmfe.c
@@ -476,8 +476,7 @@ static int dmfe_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
}
/* Set Node address */
- for (i = 0; i < 6; i++)
- dev->dev_addr[i] = db->srom[20 + i];
+ eth_hw_addr_set(dev, &db->srom[20]);
err = register_netdev (dev);
if (err)
@@ -1436,9 +1435,9 @@ static void update_cr6(u32 cr6_data, void __iomem *ioaddr)
static void dm9132_id_table(struct net_device *dev)
{
+ const u16 *addrptr = (const u16 *)dev->dev_addr;
struct dmfe_board_info *db = netdev_priv(dev);
void __iomem *ioaddr = db->ioaddr + 0xc0;
- u16 *addrptr = (u16 *)dev->dev_addr;
struct netdev_hw_addr *ha;
u16 i, hash_table[4];
@@ -1477,7 +1476,7 @@ static void send_filter_frame(struct net_device *dev)
struct dmfe_board_info *db = netdev_priv(dev);
struct netdev_hw_addr *ha;
struct tx_desc *txptr;
- u16 * addrptr;
+ const u16 * addrptr;
u32 * suptr;
int i;
@@ -1487,7 +1486,7 @@ static void send_filter_frame(struct net_device *dev)
suptr = (u32 *) txptr->tx_buf_ptr;
/* Node address */
- addrptr = (u16 *) dev->dev_addr;
+ addrptr = (const u16 *) dev->dev_addr;
*suptr++ = addrptr[0];
*suptr++ = addrptr[1];
*suptr++ = addrptr[2];
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index fcedd733bacb..79df5a72877b 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -339,7 +339,7 @@ static void tulip_up(struct net_device *dev)
}
} else {
/* This is set_rx_mode(), but without starting the transmitter. */
- u16 *eaddrs = (u16 *)dev->dev_addr;
+ const u16 *eaddrs = (const u16 *)dev->dev_addr;
u16 *setup_frm = &tp->setup_frame[15*6];
dma_addr_t mapping;
@@ -1001,8 +1001,8 @@ static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
struct tulip_private *tp = netdev_priv(dev);
u16 hash_table[32];
struct netdev_hw_addr *ha;
+ const u16 *eaddrs;
int i;
- u16 *eaddrs;
memset(hash_table, 0, sizeof(hash_table));
__set_bit_le(255, hash_table); /* Broadcast entry */
@@ -1019,7 +1019,7 @@ static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
setup_frm = &tp->setup_frame[13*6];
/* Fill the final entry with our physical address. */
- eaddrs = (u16 *)dev->dev_addr;
+ eaddrs = (const u16 *)dev->dev_addr;
*setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
*setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
*setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
@@ -1029,7 +1029,7 @@ static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev)
{
struct tulip_private *tp = netdev_priv(dev);
struct netdev_hw_addr *ha;
- u16 *eaddrs;
+ const u16 *eaddrs;
/* We have <= 14 addresses so we can use the wonderful
16 address perfect filtering of the Tulip. */
@@ -1044,7 +1044,7 @@ static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev)
setup_frm = &tp->setup_frame[15*6];
/* Fill the final entry with our physical address. */
- eaddrs = (u16 *)dev->dev_addr;
+ eaddrs = (const u16 *)dev->dev_addr;
*setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
*setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
*setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
@@ -1305,6 +1305,7 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
int chip_idx = ent->driver_data;
const char *chip_name = tulip_tbl[chip_idx].chip_name;
unsigned int eeprom_missing = 0;
+ u8 addr[ETH_ALEN] __aligned(2);
unsigned int force_csr0 = 0;
board_idx++;
@@ -1506,13 +1507,15 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
do {
value = ioread32(ioaddr + CSR9);
} while (value < 0 && --boguscnt > 0);
- put_unaligned_le16(value, ((__le16 *)dev->dev_addr) + i);
+ put_unaligned_le16(value, ((__le16 *)addr) + i);
sum += value & 0xffff;
}
+ eth_hw_addr_set(dev, addr);
} else if (chip_idx == COMET) {
/* No need to read the EEPROM. */
- put_unaligned_le32(ioread32(ioaddr + 0xA4), dev->dev_addr);
- put_unaligned_le16(ioread32(ioaddr + 0xA8), dev->dev_addr + 4);
+ put_unaligned_le32(ioread32(ioaddr + 0xA4), addr);
+ put_unaligned_le16(ioread32(ioaddr + 0xA8), addr + 4);
+ eth_hw_addr_set(dev, addr);
for (i = 0; i < 6; i ++)
sum += dev->dev_addr[i];
} else {
@@ -1575,20 +1578,23 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
#endif
for (i = 0; i < 6; i ++) {
- dev->dev_addr[i] = ee_data[i + sa_offset];
+ addr[i] = ee_data[i + sa_offset];
sum += ee_data[i + sa_offset];
}
+ eth_hw_addr_set(dev, addr);
}
/* Lite-On boards have the address byte-swapped. */
if ((dev->dev_addr[0] == 0xA0 ||
dev->dev_addr[0] == 0xC0 ||
dev->dev_addr[0] == 0x02) &&
- dev->dev_addr[1] == 0x00)
+ dev->dev_addr[1] == 0x00) {
for (i = 0; i < 6; i+=2) {
- char tmp = dev->dev_addr[i];
- dev->dev_addr[i] = dev->dev_addr[i+1];
- dev->dev_addr[i+1] = tmp;
+ addr[i] = dev->dev_addr[i+1];
+ addr[i+1] = dev->dev_addr[i];
}
+ eth_hw_addr_set(dev, addr);
+ }
+
/* On the Zynx 315 Etherarray and other multiport boards only the
first Tulip has an EEPROM.
On Sparc systems the mac address is held in the OBP property
@@ -1599,17 +1605,18 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (sum == 0 || sum == 6*0xff) {
#if defined(CONFIG_SPARC)
struct device_node *dp = pci_device_to_OF_node(pdev);
- const unsigned char *addr;
+ const unsigned char *addr2;
int len;
#endif
eeprom_missing = 1;
for (i = 0; i < 5; i++)
- dev->dev_addr[i] = last_phys_addr[i];
- dev->dev_addr[i] = last_phys_addr[i] + 1;
+ addr[i] = last_phys_addr[i];
+ addr[i] = last_phys_addr[i] + 1;
+ eth_hw_addr_set(dev, addr);
#if defined(CONFIG_SPARC)
- addr = of_get_property(dp, "local-mac-address", &len);
- if (addr && len == ETH_ALEN)
- memcpy(dev->dev_addr, addr, ETH_ALEN);
+ addr2 = of_get_property(dp, "local-mac-address", &len);
+ if (addr2 && len == ETH_ALEN)
+ eth_hw_addr_set(dev, addr2);
#endif
#if defined(__i386__) || defined(__x86_64__) /* Patch up x86 BIOS bug. */
if (last_irq)
diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c
index d67ef7d02d6b..77d9058431e3 100644
--- a/drivers/net/ethernet/dec/tulip/uli526x.c
+++ b/drivers/net/ethernet/dec/tulip/uli526x.c
@@ -272,6 +272,7 @@ static int uli526x_init_one(struct pci_dev *pdev,
struct uli526x_board_info *db; /* board information structure */
struct net_device *dev;
void __iomem *ioaddr;
+ u8 addr[ETH_ALEN];
int i, err;
ULI526X_DBUG(0, "uli526x_init_one()", 0);
@@ -379,7 +380,7 @@ static int uli526x_init_one(struct pci_dev *pdev,
uw32(DCR13, 0x1b0); //Select ID Table access port
//Read MAC address from CR14
for (i = 0; i < 6; i++)
- dev->dev_addr[i] = ur32(DCR14);
+ addr[i] = ur32(DCR14);
//Read end
uw32(DCR13, 0); //Clear CR13
uw32(DCR0, 0); //Clear CR0
@@ -388,8 +389,10 @@ static int uli526x_init_one(struct pci_dev *pdev,
else /*Exist SROM*/
{
for (i = 0; i < 6; i++)
- dev->dev_addr[i] = db->srom[20 + i];
+ addr[i] = db->srom[20 + i];
}
+ eth_hw_addr_set(dev, addr);
+
err = register_netdev (dev);
if (err)
goto err_out_unmap;
@@ -1343,7 +1346,7 @@ static void send_filter_frame(struct net_device *dev, int mc_cnt)
void __iomem *ioaddr = db->ioaddr;
struct netdev_hw_addr *ha;
struct tx_desc *txptr;
- u16 * addrptr;
+ const u16 * addrptr;
u32 * suptr;
int i;
@@ -1353,7 +1356,7 @@ static void send_filter_frame(struct net_device *dev, int mc_cnt)
suptr = (u32 *) txptr->tx_buf_ptr;
/* Node address */
- addrptr = (u16 *) dev->dev_addr;
+ addrptr = (const u16 *) dev->dev_addr;
*suptr++ = addrptr[0] << FLT_SHIFT;
*suptr++ = addrptr[1] << FLT_SHIFT;
*suptr++ = addrptr[2] << FLT_SHIFT;
diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
index 85b99099c6b9..86b1d23eba83 100644
--- a/drivers/net/ethernet/dec/tulip/winbond-840.c
+++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
@@ -355,6 +355,7 @@ static int w840_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
int chip_idx = ent->driver_data;
int irq;
int i, option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
+ __le16 addr[ETH_ALEN / 2];
void __iomem *ioaddr;
i = pcim_enable_device(pdev);
@@ -382,7 +383,8 @@ static int w840_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_netdev;
for (i = 0; i < 3; i++)
- ((__le16 *)dev->dev_addr)[i] = cpu_to_le16(eeprom_read(ioaddr, i));
+ addr[i] = cpu_to_le16(eeprom_read(ioaddr, i));
+ eth_hw_addr_set(dev, (u8 *)addr);
/* Reset the chip to erase previous misconfiguration.
No hold time required! */
@@ -877,7 +879,7 @@ static void init_registers(struct net_device *dev)
8000 16 longwords 0200 2 longwords 2000 32 longwords
C000 32 longwords 0400 4 longwords */
-#if defined (__i386__) && !defined(MODULE)
+#if defined (__i386__) && !defined(MODULE) && !defined(CONFIG_UML)
/* When not a module we can work around broken '486 PCI boards. */
if (boot_cpu_data.x86 <= 4) {
i |= 0x4800;
diff --git a/drivers/net/ethernet/dec/tulip/xircom_cb.c b/drivers/net/ethernet/dec/tulip/xircom_cb.c
index a8de79355578..8759f9f76b62 100644
--- a/drivers/net/ethernet/dec/tulip/xircom_cb.c
+++ b/drivers/net/ethernet/dec/tulip/xircom_cb.c
@@ -1015,12 +1015,14 @@ static void read_mac_address(struct xircom_private *card)
xw32(CSR10, i + 3);
data_count = xr32(CSR9);
if ((tuple == 0x22) && (data_id == 0x04) && (data_count == 0x06)) {
+ u8 addr[ETH_ALEN];
int j;
for (j = 0; j < 6; j++) {
xw32(CSR10, i + j + 4);
- card->dev->dev_addr[j] = xr32(CSR9) & 0xff;
+ addr[j] = xr32(CSR9) & 0xff;
}
+ eth_hw_addr_set(card->dev, addr);
break;
} else if (link == 0) {
break;
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index 202ecb132053..a301f7e6a440 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -349,8 +349,7 @@ parse_eeprom (struct net_device *dev)
}
/* Set MAC address */
- for (i = 0; i < 6; i++)
- dev->dev_addr[i] = psrom->mac_addr[i];
+ eth_hw_addr_set(dev, psrom->mac_addr);
if (np->chip_id == CHIP_IP1000A) {
np->led_mode = psrom->led_mode;
@@ -567,7 +566,7 @@ static void rio_hw_init(struct net_device *dev)
*/
for (i = 0; i < 3; i++)
dw16(StationAddr0 + 2 * i,
- cpu_to_le16(((u16 *)dev->dev_addr)[i]));
+ cpu_to_le16(((const u16 *)dev->dev_addr)[i]));
set_multicast (dev);
if (np->coalesce) {
diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
index c36d186dffed..c710dc17be90 100644
--- a/drivers/net/ethernet/dlink/sundance.c
+++ b/drivers/net/ethernet/dlink/sundance.c
@@ -508,6 +508,7 @@ static int sundance_probe1(struct pci_dev *pdev,
int bar = 1;
#endif
int phy, phy_end, phy_idx = 0;
+ __le16 addr[ETH_ALEN / 2];
if (pci_enable_device(pdev))
return -EIO;
@@ -528,8 +529,9 @@ static int sundance_probe1(struct pci_dev *pdev,
goto err_out_res;
for (i = 0; i < 3; i++)
- ((__le16 *)dev->dev_addr)[i] =
+ addr[i] =
cpu_to_le16(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET));
+ eth_hw_addr_set(dev, (u8 *)addr);
np = netdev_priv(dev);
np->ndev = dev;
@@ -1611,7 +1613,7 @@ static int sundance_set_mac_addr(struct net_device *dev, void *data)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
+ eth_hw_addr_set(dev, addr->sa_data);
__set_mac_addr(dev);
return 0;
diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c
index 6c51cf991dad..92462ed87bc4 100644
--- a/drivers/net/ethernet/dnet.c
+++ b/drivers/net/ethernet/dnet.c
@@ -60,11 +60,11 @@ static void __dnet_set_hwaddr(struct dnet *bp)
{
u16 tmp;
- tmp = be16_to_cpup((__be16 *)bp->dev->dev_addr);
+ tmp = be16_to_cpup((const __be16 *)bp->dev->dev_addr);
dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_0_REG, tmp);
- tmp = be16_to_cpup((__be16 *)(bp->dev->dev_addr + 2));
+ tmp = be16_to_cpup((const __be16 *)(bp->dev->dev_addr + 2));
dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_1_REG, tmp);
- tmp = be16_to_cpup((__be16 *)(bp->dev->dev_addr + 4));
+ tmp = be16_to_cpup((const __be16 *)(bp->dev->dev_addr + 4));
dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_2_REG, tmp);
}
@@ -93,7 +93,7 @@ static void dnet_get_hwaddr(struct dnet *bp)
*((__be16 *)(addr + 4)) = cpu_to_be16(tmp);
if (is_valid_ether_addr(addr))
- memcpy(bp->dev->dev_addr, addr, sizeof(addr));
+ eth_hw_addr_set(bp->dev, addr);
}
static int dnet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
diff --git a/drivers/net/ethernet/ec_bhf.c b/drivers/net/ethernet/ec_bhf.c
index b2d4fb3feb74..46e3a05e9582 100644
--- a/drivers/net/ethernet/ec_bhf.c
+++ b/drivers/net/ethernet/ec_bhf.c
@@ -479,6 +479,7 @@ static int ec_bhf_probe(struct pci_dev *dev, const struct pci_device_id *id)
struct net_device *net_dev;
struct ec_bhf_priv *priv;
void __iomem *dma_io;
+ u8 addr[ETH_ALEN];
void __iomem *io;
int err = 0;
@@ -539,7 +540,8 @@ static int ec_bhf_probe(struct pci_dev *dev, const struct pci_device_id *id)
if (err < 0)
goto err_free_net_dev;
- memcpy_fromio(net_dev->dev_addr, priv->mii_io + MII_MAC_ADDR, 6);
+ memcpy_fromio(addr, priv->mii_io + MII_MAC_ADDR, ETH_ALEN);
+ eth_hw_addr_set(net_dev, addr);
err = register_netdev(net_dev);
if (err < 0)
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 649c5c429bd7..528eb0f223b1 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -1080,7 +1080,7 @@ err:
}
/* Uses synchronous MCCQ */
-int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
+int be_cmd_pmac_add(struct be_adapter *adapter, const u8 *mac_addr,
u32 if_id, u32 *pmac_id, u32 domain)
{
struct be_mcc_wrb *wrb;
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index c30d6d6f0f3a..db1f3b908582 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -2385,7 +2385,7 @@ int be_pci_fnum_get(struct be_adapter *adapter);
int be_fw_wait_ready(struct be_adapter *adapter);
int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
bool permanent, u32 if_handle, u32 pmac_id);
-int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr, u32 if_id,
+int be_cmd_pmac_add(struct be_adapter *adapter, const u8 *mac_addr, u32 if_id,
u32 *pmac_id, u32 domain);
int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id,
u32 domain);
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 361c1c87c183..d51f24c9e1b8 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -272,7 +272,7 @@ void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
iowrite32(val, adapter->db + DB_CQ_OFFSET);
}
-static int be_dev_mac_add(struct be_adapter *adapter, u8 *mac)
+static int be_dev_mac_add(struct be_adapter *adapter, const u8 *mac)
{
int i;
@@ -369,7 +369,7 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
/* Remember currently programmed MAC */
ether_addr_copy(adapter->dev_mac, addr->sa_data);
done:
- ether_addr_copy(netdev->dev_addr, addr->sa_data);
+ eth_hw_addr_set(netdev, addr->sa_data);
dev_info(dev, "MAC address changed to %pM\n", addr->sa_data);
return 0;
err:
@@ -4599,7 +4599,7 @@ static int be_mac_setup(struct be_adapter *adapter)
if (status)
return status;
- memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
+ eth_hw_addr_set(adapter->netdev, mac);
memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
/* Initial MAC for BE3 VFs is already programmed by PF */
@@ -4621,7 +4621,6 @@ static void be_destroy_err_recovery_workq(void)
if (!be_err_recovery_workq)
return;
- flush_workqueue(be_err_recovery_workq);
destroy_workqueue(be_err_recovery_workq);
be_err_recovery_workq = NULL;
}
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index ed1ed48e7483..b1c8ffea6ad2 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -707,20 +707,16 @@ static int ethoc_mdio_probe(struct net_device *dev)
else
phy = phy_find_first(priv->mdio);
- if (!phy) {
- dev_err(&dev->dev, "no PHY found\n");
- return -ENXIO;
- }
+ if (!phy)
+ return dev_err_probe(&dev->dev, -ENXIO, "no PHY found\n");
priv->old_duplex = -1;
priv->old_link = -1;
err = phy_connect_direct(dev, phy, ethoc_mdio_poll,
PHY_INTERFACE_MODE_GMII);
- if (err) {
- dev_err(&dev->dev, "could not attach to PHY\n");
- return err;
- }
+ if (err)
+ return dev_err_probe(&dev->dev, err, "could not attach to PHY\n");
phy_set_max_speed(phy, SPEED_100);
@@ -806,8 +802,8 @@ static int ethoc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
static void ethoc_do_set_mac_address(struct net_device *dev)
{
+ const unsigned char *mac = dev->dev_addr;
struct ethoc *priv = netdev_priv(dev);
- unsigned char *mac = dev->dev_addr;
ethoc_write(priv, MAC_ADDR0, (mac[2] << 24) | (mac[3] << 16) |
(mac[4] << 8) | (mac[5] << 0));
@@ -820,7 +816,7 @@ static int ethoc_set_mac_address(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
+ eth_hw_addr_set(dev, addr->sa_data);
ethoc_do_set_mac_address(dev);
return 0;
}
@@ -1148,18 +1144,22 @@ static int ethoc_probe(struct platform_device *pdev)
/* Allow the platform setup code to pass in a MAC address. */
if (pdata) {
- ether_addr_copy(netdev->dev_addr, pdata->hwaddr);
+ eth_hw_addr_set(netdev, pdata->hwaddr);
priv->phy_id = pdata->phy_id;
} else {
- of_get_mac_address(pdev->dev.of_node, netdev->dev_addr);
+ of_get_ethdev_address(pdev->dev.of_node, netdev);
priv->phy_id = -1;
}
/* Check that the given MAC address is valid. If it isn't, read the
* current MAC from the controller.
*/
- if (!is_valid_ether_addr(netdev->dev_addr))
- ethoc_get_mac_address(netdev, netdev->dev_addr);
+ if (!is_valid_ether_addr(netdev->dev_addr)) {
+ u8 addr[ETH_ALEN];
+
+ ethoc_get_mac_address(netdev, addr);
+ eth_hw_addr_set(netdev, addr);
+ }
/* Check the MAC again for validity, if it still isn't choose and
* program a random one.
diff --git a/drivers/net/ethernet/ezchip/Kconfig b/drivers/net/ethernet/ezchip/Kconfig
index 38aa824efb25..9241b9b1c7a3 100644
--- a/drivers/net/ethernet/ezchip/Kconfig
+++ b/drivers/net/ethernet/ezchip/Kconfig
@@ -18,7 +18,7 @@ if NET_VENDOR_EZCHIP
config EZCHIP_NPS_MANAGEMENT_ENET
tristate "EZchip NPS management enet support"
- depends on OF_IRQ && OF_NET
+ depends on OF_IRQ
depends on HAS_IOMEM
help
Simple LAN device for debug or management purposes.
diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c
index f9a288a6ec8c..323340826dab 100644
--- a/drivers/net/ethernet/ezchip/nps_enet.c
+++ b/drivers/net/ethernet/ezchip/nps_enet.c
@@ -421,7 +421,7 @@ static s32 nps_enet_set_mac_address(struct net_device *ndev, void *p)
res = eth_mac_addr(ndev, p);
if (!res) {
- ether_addr_copy(ndev->dev_addr, addr->sa_data);
+ eth_hw_addr_set(ndev, addr->sa_data);
nps_enet_set_hw_mac_address(ndev);
}
@@ -601,7 +601,7 @@ static s32 nps_enet_probe(struct platform_device *pdev)
dev_dbg(dev, "Registers base address is 0x%p\n", priv->regs_base);
/* set kernel MAC address to dev */
- err = of_get_mac_address(dev->of_node, ndev->dev_addr);
+ err = of_get_ethdev_address(dev->of_node, ndev);
if (err)
eth_hw_addr_random(ndev);
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index ff76e401a014..97c5d70de76e 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -182,13 +182,10 @@ static void ftgmac100_initial_mac(struct ftgmac100 *priv)
u8 mac[ETH_ALEN];
unsigned int m;
unsigned int l;
- void *addr;
- addr = device_get_mac_address(priv->dev, mac, ETH_ALEN);
- if (addr) {
- ether_addr_copy(priv->netdev->dev_addr, mac);
+ if (!device_get_ethdev_address(priv->dev, priv->netdev)) {
dev_info(priv->dev, "Read MAC address %pM from device tree\n",
- mac);
+ priv->netdev->dev_addr);
return;
}
@@ -203,7 +200,7 @@ static void ftgmac100_initial_mac(struct ftgmac100 *priv)
mac[5] = l & 0xff;
if (is_valid_ether_addr(mac)) {
- ether_addr_copy(priv->netdev->dev_addr, mac);
+ eth_hw_addr_set(priv->netdev, mac);
dev_info(priv->dev, "Read MAC address %pM from chip\n", mac);
} else {
eth_hw_addr_random(priv->netdev);
diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
index 25c91b3c5fd3..b3939a5f7b03 100644
--- a/drivers/net/ethernet/fealnx.c
+++ b/drivers/net/ethernet/fealnx.c
@@ -482,6 +482,7 @@ static int fealnx_init_one(struct pci_dev *pdev,
struct net_device *dev;
void *ring_space;
dma_addr_t ring_dma;
+ u8 addr[ETH_ALEN];
#ifdef USE_IO_OPS
int bar = 0;
#else
@@ -525,7 +526,8 @@ static int fealnx_init_one(struct pci_dev *pdev,
/* read ethernet id */
for (i = 0; i < 6; ++i)
- dev->dev_addr[i] = ioread8(ioaddr + PAR0 + i);
+ addr[i] = ioread8(ioaddr + PAR0 + i);
+ eth_hw_addr_set(dev, addr);
/* Reset the chip to erase previous misconfiguration. */
iowrite32(0x00000001, ioaddr + BCR);
@@ -827,7 +829,7 @@ static int netdev_open(struct net_device *dev)
return -EAGAIN;
for (i = 0; i < 3; i++)
- iowrite16(((unsigned short*)dev->dev_addr)[i],
+ iowrite16(((const unsigned short *)dev->dev_addr)[i],
ioaddr + PAR0 + i*2);
init_ring(dev);
@@ -857,7 +859,7 @@ static int netdev_open(struct net_device *dev)
np->bcrvalue |= 0x04; /* big-endian */
#endif
-#if defined(__i386__) && !defined(MODULE)
+#if defined(__i386__) && !defined(MODULE) && !defined(CONFIG_UML)
if (boot_cpu_data.x86 <= 4)
np->crvalue = 0xa00;
else
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index 685d2d8a3b36..6b2927d863e2 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -268,11 +268,11 @@ static int dpaa_netdev_init(struct net_device *net_dev,
if (is_valid_ether_addr(mac_addr)) {
memcpy(net_dev->perm_addr, mac_addr, net_dev->addr_len);
- memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len);
+ eth_hw_addr_set(net_dev, mac_addr);
} else {
eth_hw_addr_random(net_dev);
err = priv->mac_dev->change_addr(priv->mac_dev->fman_mac,
- (enet_addr_t *)net_dev->dev_addr);
+ (const enet_addr_t *)net_dev->dev_addr);
if (err) {
dev_err(dev, "Failed to set random MAC address\n");
return -EINVAL;
@@ -452,7 +452,7 @@ static int dpaa_set_mac_address(struct net_device *net_dev, void *addr)
mac_dev = priv->mac_dev;
err = mac_dev->change_addr(mac_dev->fman_mac,
- (enet_addr_t *)net_dev->dev_addr);
+ (const enet_addr_t *)net_dev->dev_addr);
if (err < 0) {
netif_err(priv, drv, net_dev, "mac_dev->change_addr() = %d\n",
err);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-devlink.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-devlink.c
index 605a39f892b9..7fefe1574b6a 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-devlink.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-devlink.c
@@ -189,12 +189,11 @@ static const struct devlink_ops dpaa2_eth_devlink_ops = {
.trap_group_action_set = dpaa2_eth_dl_trap_group_action_set,
};
-int dpaa2_eth_dl_register(struct dpaa2_eth_priv *priv)
+int dpaa2_eth_dl_alloc(struct dpaa2_eth_priv *priv)
{
struct net_device *net_dev = priv->net_dev;
struct device *dev = net_dev->dev.parent;
struct dpaa2_eth_devlink_priv *dl_priv;
- int err;
priv->devlink =
devlink_alloc(&dpaa2_eth_devlink_ops, sizeof(*dl_priv), dev);
@@ -204,25 +203,23 @@ int dpaa2_eth_dl_register(struct dpaa2_eth_priv *priv)
}
dl_priv = devlink_priv(priv->devlink);
dl_priv->dpaa2_priv = priv;
-
- err = devlink_register(priv->devlink);
- if (err) {
- dev_err(dev, "devlink_register() = %d\n", err);
- goto devlink_free;
- }
-
return 0;
+}
-devlink_free:
+void dpaa2_eth_dl_free(struct dpaa2_eth_priv *priv)
+{
devlink_free(priv->devlink);
+}
- return err;
+
+void dpaa2_eth_dl_register(struct dpaa2_eth_priv *priv)
+{
+ devlink_register(priv->devlink);
}
void dpaa2_eth_dl_unregister(struct dpaa2_eth_priv *priv)
{
devlink_unregister(priv->devlink);
- devlink_free(priv->devlink);
}
int dpaa2_eth_dl_port_add(struct dpaa2_eth_priv *priv)
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index 7065c71ed7b8..714e961e7a77 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -533,6 +533,7 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
percpu_stats->rx_packets++;
percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
+ ch->stats.bytes_per_cdan += dpaa2_fd_get_len(fd);
list_add_tail(&skb->list, ch->rx_list);
@@ -641,6 +642,7 @@ static int dpaa2_eth_consume_frames(struct dpaa2_eth_channel *ch,
fq->stats.frames += cleaned;
ch->stats.frames += cleaned;
+ ch->stats.frames_per_cdan += cleaned;
/* A dequeue operation only pulls frames from a single queue
* into the store. Return the frame queue as an out param.
@@ -1264,7 +1266,7 @@ static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
/* Tx confirmation frame processing routine */
static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
- struct dpaa2_eth_channel *ch __always_unused,
+ struct dpaa2_eth_channel *ch,
const struct dpaa2_fd *fd,
struct dpaa2_eth_fq *fq)
{
@@ -1279,6 +1281,7 @@ static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
percpu_extras = this_cpu_ptr(priv->percpu_extras);
percpu_extras->tx_conf_frames++;
percpu_extras->tx_conf_bytes += fd_len;
+ ch->stats.bytes_per_cdan += fd_len;
/* Check frame errors in the FD field */
fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_TX_ERR_MASK;
@@ -1601,6 +1604,12 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
}
} while (store_cleaned);
+ /* Update NET DIM with the values for this CDAN */
+ dpaa2_io_update_net_dim(ch->dpio, ch->stats.frames_per_cdan,
+ ch->stats.bytes_per_cdan);
+ ch->stats.frames_per_cdan = 0;
+ ch->stats.bytes_per_cdan = 0;
+
/* We didn't consume the entire budget, so finish napi and
* re-enable data availability notifications
*/
@@ -4013,7 +4022,7 @@ static int dpaa2_eth_set_mac_addr(struct dpaa2_eth_priv *priv)
return err;
}
}
- memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len);
+ eth_hw_addr_set(net_dev, mac_addr);
} else if (is_zero_ether_addr(dpni_mac_addr)) {
/* No MAC address configured, fill in net_dev->dev_addr
* with a random one
@@ -4038,7 +4047,7 @@ static int dpaa2_eth_set_mac_addr(struct dpaa2_eth_priv *priv)
/* NET_ADDR_PERM is default, all we have to do is
* fill in the device addr.
*/
- memcpy(net_dev->dev_addr, dpni_mac_addr, net_dev->addr_len);
+ eth_hw_addr_set(net_dev, dpni_mac_addr);
}
return 0;
@@ -4431,7 +4440,7 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
if (err)
goto err_connect_mac;
- err = dpaa2_eth_dl_register(priv);
+ err = dpaa2_eth_dl_alloc(priv);
if (err)
goto err_dl_register;
@@ -4453,6 +4462,7 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
dpaa2_dbg_add(priv);
#endif
+ dpaa2_eth_dl_register(priv);
dev_info(dev, "Probed interface %s\n", net_dev->name);
return 0;
@@ -4461,7 +4471,7 @@ err_netdev_reg:
err_dl_port_add:
dpaa2_eth_dl_traps_unregister(priv);
err_dl_trap_register:
- dpaa2_eth_dl_unregister(priv);
+ dpaa2_eth_dl_free(priv);
err_dl_register:
dpaa2_eth_disconnect_mac(priv);
err_connect_mac:
@@ -4508,6 +4518,8 @@ static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
net_dev = dev_get_drvdata(dev);
priv = netdev_priv(net_dev);
+ dpaa2_eth_dl_unregister(priv);
+
#ifdef CONFIG_DEBUG_FS
dpaa2_dbg_remove(priv);
#endif
@@ -4519,7 +4531,7 @@ static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
dpaa2_eth_dl_port_del(priv);
dpaa2_eth_dl_traps_unregister(priv);
- dpaa2_eth_dl_unregister(priv);
+ dpaa2_eth_dl_free(priv);
if (priv->do_link_poll)
kthread_stop(priv->poll_thread);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
index cdb623d5f2c1..2085844227fe 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
@@ -384,6 +384,8 @@ struct dpaa2_eth_ch_stats {
__u64 xdp_redirect;
/* Must be last, does not show up in ethtool stats */
__u64 frames;
+ __u64 frames_per_cdan;
+ __u64 bytes_per_cdan;
};
/* Maximum number of queues associated with a DPNI */
@@ -725,7 +727,10 @@ void dpaa2_eth_set_rx_taildrop(struct dpaa2_eth_priv *priv,
extern const struct dcbnl_rtnl_ops dpaa2_eth_dcbnl_ops;
-int dpaa2_eth_dl_register(struct dpaa2_eth_priv *priv);
+int dpaa2_eth_dl_alloc(struct dpaa2_eth_priv *priv);
+void dpaa2_eth_dl_free(struct dpaa2_eth_priv *priv);
+
+void dpaa2_eth_dl_register(struct dpaa2_eth_priv *priv);
void dpaa2_eth_dl_unregister(struct dpaa2_eth_priv *priv);
int dpaa2_eth_dl_port_add(struct dpaa2_eth_priv *priv);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
index 2da5f881f630..adb8ce5306ee 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
@@ -820,7 +820,63 @@ static int dpaa2_eth_set_tunable(struct net_device *net_dev,
return err;
}
+static int dpaa2_eth_get_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *ic,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(dev);
+ struct dpaa2_io *dpio = priv->channel[0]->dpio;
+
+ dpaa2_io_get_irq_coalescing(dpio, &ic->rx_coalesce_usecs);
+ ic->use_adaptive_rx_coalesce = dpaa2_io_get_adaptive_coalescing(dpio);
+
+ return 0;
+}
+
+static int dpaa2_eth_set_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *ic,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(dev);
+ struct dpaa2_io *dpio;
+ int prev_adaptive;
+ u32 prev_rx_usecs;
+ int i, j, err;
+
+ /* Keep track of the previous value, just in case we fail */
+ dpio = priv->channel[0]->dpio;
+ dpaa2_io_get_irq_coalescing(dpio, &prev_rx_usecs);
+ prev_adaptive = dpaa2_io_get_adaptive_coalescing(dpio);
+
+ /* Setup new value for rx coalescing */
+ for (i = 0; i < priv->num_channels; i++) {
+ dpio = priv->channel[i]->dpio;
+
+ dpaa2_io_set_adaptive_coalescing(dpio,
+ ic->use_adaptive_rx_coalesce);
+ err = dpaa2_io_set_irq_coalescing(dpio, ic->rx_coalesce_usecs);
+ if (err)
+ goto restore_rx_usecs;
+ }
+
+ return 0;
+
+restore_rx_usecs:
+ for (j = 0; j < i; j++) {
+ dpio = priv->channel[j]->dpio;
+
+ dpaa2_io_set_irq_coalescing(dpio, prev_rx_usecs);
+ dpaa2_io_set_adaptive_coalescing(dpio, prev_adaptive);
+ }
+
+ return err;
+}
+
const struct ethtool_ops dpaa2_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
+ ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
.get_drvinfo = dpaa2_eth_get_drvinfo,
.nway_reset = dpaa2_eth_nway_reset,
.get_link = ethtool_op_get_link,
@@ -836,4 +892,6 @@ const struct ethtool_ops dpaa2_ethtool_ops = {
.get_ts_info = dpaa2_eth_get_ts_info,
.get_tunable = dpaa2_eth_get_tunable,
.set_tunable = dpaa2_eth_set_tunable,
+ .get_coalesce = dpaa2_eth_get_coalesce,
+ .set_coalesce = dpaa2_eth_set_coalesce,
};
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
index ae6d382d8735..ef8f0a055024 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
@@ -139,7 +139,7 @@ static void dpaa2_mac_validate(struct phylink_config *config,
case PHY_INTERFACE_MODE_NA:
case PHY_INTERFACE_MODE_10GBASER:
case PHY_INTERFACE_MODE_USXGMII:
- phylink_set(mask, 10000baseT_Full);
+ phylink_set_10g_modes(mask);
if (state->interface == PHY_INTERFACE_MODE_10GBASER)
break;
phylink_set(mask, 5000baseT_Full);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
index 175f15c46842..d039457928b0 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
@@ -980,7 +980,7 @@ static int dpaa2_switch_port_set_mac_addr(struct ethsw_port_priv *port_priv)
/* First check if firmware has any address configured by bootloader */
if (!is_zero_ether_addr(mac_addr)) {
- memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len);
+ eth_hw_addr_set(net_dev, mac_addr);
} else {
/* No MAC address configured, fill in net_dev->dev_addr
* with a random one
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index 042327b9981f..504e12554079 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -7,7 +7,9 @@
#include <linux/udp.h>
#include <linux/vmalloc.h>
#include <linux/ptp_classify.h>
+#include <net/ip6_checksum.h>
#include <net/pkt_sched.h>
+#include <net/tso.h>
static int enetc_num_stack_tx_queues(struct enetc_ndev_priv *priv)
{
@@ -314,12 +316,261 @@ dma_err:
return 0;
}
+static void enetc_map_tx_tso_hdr(struct enetc_bdr *tx_ring, struct sk_buff *skb,
+ struct enetc_tx_swbd *tx_swbd,
+ union enetc_tx_bd *txbd, int *i, int hdr_len,
+ int data_len)
+{
+ union enetc_tx_bd txbd_tmp;
+ u8 flags = 0, e_flags = 0;
+ dma_addr_t addr;
+
+ enetc_clear_tx_bd(&txbd_tmp);
+ addr = tx_ring->tso_headers_dma + *i * TSO_HEADER_SIZE;
+
+ if (skb_vlan_tag_present(skb))
+ flags |= ENETC_TXBD_FLAGS_EX;
+
+ txbd_tmp.addr = cpu_to_le64(addr);
+ txbd_tmp.buf_len = cpu_to_le16(hdr_len);
+
+ /* first BD needs frm_len and offload flags set */
+ txbd_tmp.frm_len = cpu_to_le16(hdr_len + data_len);
+ txbd_tmp.flags = flags;
+
+ /* For the TSO header we do not set the dma address since we do not
+ * want it unmapped when we do cleanup. We still set len so that we
+ * count the bytes sent.
+ */
+ tx_swbd->len = hdr_len;
+ tx_swbd->do_twostep_tstamp = false;
+ tx_swbd->check_wb = false;
+
+ /* Actually write the header in the BD */
+ *txbd = txbd_tmp;
+
+ /* Add extension BD for VLAN */
+ if (flags & ENETC_TXBD_FLAGS_EX) {
+ /* Get the next BD */
+ enetc_bdr_idx_inc(tx_ring, i);
+ txbd = ENETC_TXBD(*tx_ring, *i);
+ tx_swbd = &tx_ring->tx_swbd[*i];
+ prefetchw(txbd);
+
+ /* Setup the VLAN fields */
+ enetc_clear_tx_bd(&txbd_tmp);
+ txbd_tmp.ext.vid = cpu_to_le16(skb_vlan_tag_get(skb));
+ txbd_tmp.ext.tpid = 0; /* < C-TAG */
+ e_flags |= ENETC_TXBD_E_FLAGS_VLAN_INS;
+
+ /* Write the BD */
+ txbd_tmp.ext.e_flags = e_flags;
+ *txbd = txbd_tmp;
+ }
+}
+
+static int enetc_map_tx_tso_data(struct enetc_bdr *tx_ring, struct sk_buff *skb,
+ struct enetc_tx_swbd *tx_swbd,
+ union enetc_tx_bd *txbd, char *data,
+ int size, bool last_bd)
+{
+ union enetc_tx_bd txbd_tmp;
+ dma_addr_t addr;
+ u8 flags = 0;
+
+ enetc_clear_tx_bd(&txbd_tmp);
+
+ addr = dma_map_single(tx_ring->dev, data, size, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(tx_ring->dev, addr))) {
+ netdev_err(tx_ring->ndev, "DMA map error\n");
+ return -ENOMEM;
+ }
+
+ if (last_bd) {
+ flags |= ENETC_TXBD_FLAGS_F;
+ tx_swbd->is_eof = 1;
+ }
+
+ txbd_tmp.addr = cpu_to_le64(addr);
+ txbd_tmp.buf_len = cpu_to_le16(size);
+ txbd_tmp.flags = flags;
+
+ tx_swbd->dma = addr;
+ tx_swbd->len = size;
+ tx_swbd->dir = DMA_TO_DEVICE;
+
+ *txbd = txbd_tmp;
+
+ return 0;
+}
+
+static __wsum enetc_tso_hdr_csum(struct tso_t *tso, struct sk_buff *skb,
+ char *hdr, int hdr_len, int *l4_hdr_len)
+{
+ char *l4_hdr = hdr + skb_transport_offset(skb);
+ int mac_hdr_len = skb_network_offset(skb);
+
+ if (tso->tlen != sizeof(struct udphdr)) {
+ struct tcphdr *tcph = (struct tcphdr *)(l4_hdr);
+
+ tcph->check = 0;
+ } else {
+ struct udphdr *udph = (struct udphdr *)(l4_hdr);
+
+ udph->check = 0;
+ }
+
+ /* Compute the IP checksum. This is necessary since tso_build_hdr()
+ * already incremented the IP ID field.
+ */
+ if (!tso->ipv6) {
+ struct iphdr *iph = (void *)(hdr + mac_hdr_len);
+
+ iph->check = 0;
+ iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
+ }
+
+ /* Compute the checksum over the L4 header. */
+ *l4_hdr_len = hdr_len - skb_transport_offset(skb);
+ return csum_partial(l4_hdr, *l4_hdr_len, 0);
+}
+
+static void enetc_tso_complete_csum(struct enetc_bdr *tx_ring, struct tso_t *tso,
+ struct sk_buff *skb, char *hdr, int len,
+ __wsum sum)
+{
+ char *l4_hdr = hdr + skb_transport_offset(skb);
+ __sum16 csum_final;
+
+ /* Complete the L4 checksum by appending the pseudo-header to the
+ * already computed checksum.
+ */
+ if (!tso->ipv6)
+ csum_final = csum_tcpudp_magic(ip_hdr(skb)->saddr,
+ ip_hdr(skb)->daddr,
+ len, ip_hdr(skb)->protocol, sum);
+ else
+ csum_final = csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+ &ipv6_hdr(skb)->daddr,
+ len, ipv6_hdr(skb)->nexthdr, sum);
+
+ if (tso->tlen != sizeof(struct udphdr)) {
+ struct tcphdr *tcph = (struct tcphdr *)(l4_hdr);
+
+ tcph->check = csum_final;
+ } else {
+ struct udphdr *udph = (struct udphdr *)(l4_hdr);
+
+ udph->check = csum_final;
+ }
+}
+
+static int enetc_map_tx_tso_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
+{
+ int hdr_len, total_len, data_len;
+ struct enetc_tx_swbd *tx_swbd;
+ union enetc_tx_bd *txbd;
+ struct tso_t tso;
+ __wsum csum, csum2;
+ int count = 0, pos;
+ int err, i, bd_data_num;
+
+ /* Initialize the TSO handler, and prepare the first payload */
+ hdr_len = tso_start(skb, &tso);
+ total_len = skb->len - hdr_len;
+ i = tx_ring->next_to_use;
+
+ while (total_len > 0) {
+ char *hdr;
+
+ /* Get the BD */
+ txbd = ENETC_TXBD(*tx_ring, i);
+ tx_swbd = &tx_ring->tx_swbd[i];
+ prefetchw(txbd);
+
+ /* Determine the length of this packet */
+ data_len = min_t(int, skb_shinfo(skb)->gso_size, total_len);
+ total_len -= data_len;
+
+ /* prepare packet headers: MAC + IP + TCP */
+ hdr = tx_ring->tso_headers + i * TSO_HEADER_SIZE;
+ tso_build_hdr(skb, hdr, &tso, data_len, total_len == 0);
+
+ /* compute the csum over the L4 header */
+ csum = enetc_tso_hdr_csum(&tso, skb, hdr, hdr_len, &pos);
+ enetc_map_tx_tso_hdr(tx_ring, skb, tx_swbd, txbd, &i, hdr_len, data_len);
+ bd_data_num = 0;
+ count++;
+
+ while (data_len > 0) {
+ int size;
+
+ size = min_t(int, tso.size, data_len);
+
+ /* Advance the index in the BDR */
+ enetc_bdr_idx_inc(tx_ring, &i);
+ txbd = ENETC_TXBD(*tx_ring, i);
+ tx_swbd = &tx_ring->tx_swbd[i];
+ prefetchw(txbd);
+
+ /* Compute the checksum over this segment of data and
+ * add it to the csum already computed (over the L4
+ * header and possible other data segments).
+ */
+ csum2 = csum_partial(tso.data, size, 0);
+ csum = csum_block_add(csum, csum2, pos);
+ pos += size;
+
+ err = enetc_map_tx_tso_data(tx_ring, skb, tx_swbd, txbd,
+ tso.data, size,
+ size == data_len);
+ if (err)
+ goto err_map_data;
+
+ data_len -= size;
+ count++;
+ bd_data_num++;
+ tso_build_data(skb, &tso, size);
+
+ if (unlikely(bd_data_num >= ENETC_MAX_SKB_FRAGS && data_len))
+ goto err_chained_bd;
+ }
+
+ enetc_tso_complete_csum(tx_ring, &tso, skb, hdr, pos, csum);
+
+ if (total_len == 0)
+ tx_swbd->skb = skb;
+
+ /* Go to the next BD */
+ enetc_bdr_idx_inc(tx_ring, &i);
+ }
+
+ tx_ring->next_to_use = i;
+ enetc_update_tx_ring_tail(tx_ring);
+
+ return count;
+
+err_map_data:
+ dev_err(tx_ring->dev, "DMA map error");
+
+err_chained_bd:
+ do {
+ tx_swbd = &tx_ring->tx_swbd[i];
+ enetc_free_tx_frame(tx_ring, tx_swbd);
+ if (i == 0)
+ i = tx_ring->bd_count;
+ i--;
+ } while (count--);
+
+ return 0;
+}
+
static netdev_tx_t enetc_start_xmit(struct sk_buff *skb,
struct net_device *ndev)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct enetc_bdr *tx_ring;
- int count;
+ int count, err;
/* Queue one-step Sync packet if already locked */
if (skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) {
@@ -332,19 +583,35 @@ static netdev_tx_t enetc_start_xmit(struct sk_buff *skb,
tx_ring = priv->tx_ring[skb->queue_mapping];
- if (unlikely(skb_shinfo(skb)->nr_frags > ENETC_MAX_SKB_FRAGS))
- if (unlikely(skb_linearize(skb)))
- goto drop_packet_err;
+ if (skb_is_gso(skb)) {
+ if (enetc_bd_unused(tx_ring) < tso_count_descs(skb)) {
+ netif_stop_subqueue(ndev, tx_ring->index);
+ return NETDEV_TX_BUSY;
+ }
- count = skb_shinfo(skb)->nr_frags + 1; /* fragments + head */
- if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_NEEDED(count)) {
- netif_stop_subqueue(ndev, tx_ring->index);
- return NETDEV_TX_BUSY;
- }
+ enetc_lock_mdio();
+ count = enetc_map_tx_tso_buffs(tx_ring, skb);
+ enetc_unlock_mdio();
+ } else {
+ if (unlikely(skb_shinfo(skb)->nr_frags > ENETC_MAX_SKB_FRAGS))
+ if (unlikely(skb_linearize(skb)))
+ goto drop_packet_err;
+
+ count = skb_shinfo(skb)->nr_frags + 1; /* fragments + head */
+ if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_NEEDED(count)) {
+ netif_stop_subqueue(ndev, tx_ring->index);
+ return NETDEV_TX_BUSY;
+ }
- enetc_lock_mdio();
- count = enetc_map_tx_buffs(tx_ring, skb);
- enetc_unlock_mdio();
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ err = skb_checksum_help(skb);
+ if (err)
+ goto drop_packet_err;
+ }
+ enetc_lock_mdio();
+ count = enetc_map_tx_buffs(tx_ring, skb);
+ enetc_unlock_mdio();
+ }
if (unlikely(!count))
goto drop_packet_err;
@@ -546,10 +813,7 @@ static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget)
bool is_eof = tx_swbd->is_eof;
if (unlikely(tx_swbd->check_wb)) {
- struct enetc_ndev_priv *priv = netdev_priv(ndev);
- union enetc_tx_bd *txbd;
-
- txbd = ENETC_TXBD(*tx_ring, i);
+ union enetc_tx_bd *txbd = ENETC_TXBD(*tx_ring, i);
if (txbd->flags & ENETC_TXBD_FLAGS_W &&
tx_swbd->do_twostep_tstamp) {
@@ -567,8 +831,7 @@ static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget)
if (xdp_frame) {
xdp_return_frame(xdp_frame);
} else if (skb) {
- if (unlikely(tx_swbd->skb->cb[0] &
- ENETC_F_TX_ONESTEP_SYNC_TSTAMP)) {
+ if (unlikely(skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP)) {
/* Start work to release lock for next one-step
* timestamping packet. And send one skb in
* tx_skbs queue if has.
@@ -1493,15 +1756,32 @@ static int enetc_alloc_txbdr(struct enetc_bdr *txr)
return -ENOMEM;
err = enetc_dma_alloc_bdr(txr, sizeof(union enetc_tx_bd));
- if (err) {
- vfree(txr->tx_swbd);
- return err;
+ if (err)
+ goto err_alloc_bdr;
+
+ txr->tso_headers = dma_alloc_coherent(txr->dev,
+ txr->bd_count * TSO_HEADER_SIZE,
+ &txr->tso_headers_dma,
+ GFP_KERNEL);
+ if (!txr->tso_headers) {
+ err = -ENOMEM;
+ goto err_alloc_tso;
}
txr->next_to_clean = 0;
txr->next_to_use = 0;
return 0;
+
+err_alloc_tso:
+ dma_free_coherent(txr->dev, txr->bd_count * sizeof(union enetc_tx_bd),
+ txr->bd_base, txr->bd_dma_base);
+ txr->bd_base = NULL;
+err_alloc_bdr:
+ vfree(txr->tx_swbd);
+ txr->tx_swbd = NULL;
+
+ return err;
}
static void enetc_free_txbdr(struct enetc_bdr *txr)
@@ -1513,6 +1793,10 @@ static void enetc_free_txbdr(struct enetc_bdr *txr)
size = txr->bd_count * sizeof(union enetc_tx_bd);
+ dma_free_coherent(txr->dev, txr->bd_count * TSO_HEADER_SIZE,
+ txr->tso_headers, txr->tso_headers_dma);
+ txr->tso_headers = NULL;
+
dma_free_coherent(txr->dev, size, txr->bd_base, txr->bd_dma_base);
txr->bd_base = NULL;
@@ -2607,10 +2891,8 @@ int enetc_pci_probe(struct pci_dev *pdev, const char *name, int sizeof_priv)
pcie_flr(pdev);
err = pci_enable_device_mem(pdev);
- if (err) {
- dev_err(&pdev->dev, "device enable failed\n");
- return err;
- }
+ if (err)
+ return dev_err_probe(&pdev->dev, err, "device enable failed\n");
/* set up for high or low dma */
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h
index 08b283347d9c..fb39e406b7fc 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc.h
@@ -112,6 +112,10 @@ struct enetc_bdr {
dma_addr_t bd_dma_base;
u8 tsd_enable; /* Time specific departure */
bool ext_en; /* enable h/w descriptor extensions */
+
+ /* DMA buffer for TSO headers */
+ char *tso_headers;
+ dma_addr_t tso_headers_dma;
} ____cacheline_aligned_in_smp;
static inline void enetc_bdr_idx_inc(struct enetc_bdr *bdr, int *i)
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
index 0f5f081a5baf..1514e6a4a3ff 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_hw.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
@@ -635,10 +635,14 @@ struct enetc_cmd_rfse {
#define ENETC_RFSE_EN BIT(15)
#define ENETC_RFSE_MODE_BD 2
-static inline void enetc_get_primary_mac_addr(struct enetc_hw *hw, u8 *addr)
+static inline void enetc_load_primary_mac_addr(struct enetc_hw *hw,
+ struct net_device *ndev)
{
+ u8 addr[ETH_ALEN] __aligned(4);
+
*(u32 *)addr = __raw_readl(hw->reg + ENETC_SIPMAR0);
*(u16 *)(addr + 4) = __raw_readw(hw->reg + ENETC_SIPMAR1);
+ eth_hw_addr_set(ndev, addr);
}
#define ENETC_SI_INT_IDX 0
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
index d522bd5c90b4..64f92770691f 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
@@ -40,7 +40,7 @@ static int enetc_pf_set_mac_addr(struct net_device *ndev, void *addr)
if (!is_valid_ether_addr(saddr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(ndev->dev_addr, saddr->sa_data, ndev->addr_len);
+ eth_hw_addr_set(ndev, saddr->sa_data);
enetc_pf_set_primary_mac_addr(&priv->si->hw, 0, saddr->sa_data);
return 0;
@@ -762,10 +762,14 @@ static void enetc_pf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
ndev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_LOOPBACK;
+ NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_LOOPBACK |
+ NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6;
ndev->features = NETIF_F_HIGHDMA | NETIF_F_SG | NETIF_F_RXCSUM |
NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_CTAG_RX;
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6;
+ ndev->vlan_features = NETIF_F_SG | NETIF_F_HW_CSUM |
+ NETIF_F_TSO | NETIF_F_TSO6;
if (si->num_rss)
ndev->hw_features |= NETIF_F_RXHASH;
@@ -782,7 +786,7 @@ static void enetc_pf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
}
/* pick up primary MAC address from SI */
- enetc_get_primary_mac_addr(&si->hw, ndev->dev_addr);
+ enetc_load_primary_mac_addr(&si->hw, ndev);
}
static int enetc_mdio_probe(struct enetc_pf *pf, struct device_node *np)
@@ -806,10 +810,8 @@ static int enetc_mdio_probe(struct enetc_pf *pf, struct device_node *np)
snprintf(bus->id, MII_BUS_ID_SIZE, "%s", dev_name(dev));
err = of_mdiobus_register(bus, np);
- if (err) {
- dev_err(dev, "cannot register MDIO bus\n");
- return err;
- }
+ if (err)
+ return dev_err_probe(dev, err, "cannot register MDIO bus\n");
pf->mdio = bus;
@@ -1218,10 +1220,8 @@ static int enetc_pf_probe(struct pci_dev *pdev,
ERR_PTR(err));
err = enetc_pci_probe(pdev, KBUILD_MODNAME, sizeof(*pf));
- if (err) {
- dev_err(&pdev->dev, "PCI probing failed\n");
- return err;
- }
+ if (err)
+ return dev_err_probe(&pdev->dev, err, "PCI probing failed\n");
si = pci_get_drvdata(pdev);
if (!si->hw.port || !si->hw.global) {
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ptp.c b/drivers/net/ethernet/freescale/enetc/enetc_ptp.c
index bc594892507a..36b4f51dd297 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_ptp.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ptp.c
@@ -39,10 +39,8 @@ static int enetc_ptp_probe(struct pci_dev *pdev,
}
err = pci_enable_device_mem(pdev);
- if (err) {
- dev_err(&pdev->dev, "device enable failed\n");
- return err;
- }
+ if (err)
+ return dev_err_probe(&pdev->dev, err, "device enable failed\n");
/* set up for high or low dma */
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
index 4577226d3c6a..0536d2c76fbc 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
@@ -486,14 +486,16 @@ static int enetc_streamid_hw_set(struct enetc_ndev_priv *priv,
data_size = sizeof(struct streamid_data);
si_data = kzalloc(data_size, __GFP_DMA | GFP_KERNEL);
+ if (!si_data)
+ return -ENOMEM;
cbd.length = cpu_to_le16(data_size);
dma = dma_map_single(&priv->si->pdev->dev, si_data,
data_size, DMA_FROM_DEVICE);
if (dma_mapping_error(&priv->si->pdev->dev, dma)) {
netdev_err(priv->si->ndev, "DMA mapping failed!\n");
- kfree(si_data);
- return -ENOMEM;
+ err = -ENOMEM;
+ goto out;
}
cbd.addr[0] = cpu_to_le32(lower_32_bits(dma));
@@ -512,12 +514,10 @@ static int enetc_streamid_hw_set(struct enetc_ndev_priv *priv,
err = enetc_send_cmd(priv->si, &cbd);
if (err)
- return -EINVAL;
+ goto out;
- if (!enable) {
- kfree(si_data);
- return 0;
- }
+ if (!enable)
+ goto out;
/* Enable the entry overwrite again incase space flushed by hardware */
memset(&cbd, 0, sizeof(cbd));
@@ -560,6 +560,10 @@ static int enetc_streamid_hw_set(struct enetc_ndev_priv *priv,
}
err = enetc_send_cmd(priv->si, &cbd);
+out:
+ if (!dma_mapping_error(&priv->si->pdev->dev, dma))
+ dma_unmap_single(&priv->si->pdev->dev, dma, data_size, DMA_FROM_DEVICE);
+
kfree(si_data);
return err;
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_vf.c b/drivers/net/ethernet/freescale/enetc/enetc_vf.c
index 1a9d1e8b772c..17924305afa2 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_vf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_vf.c
@@ -122,16 +122,20 @@ static void enetc_vf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
ndev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_CTAG_RX;
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6;
ndev->features = NETIF_F_HIGHDMA | NETIF_F_SG | NETIF_F_RXCSUM |
NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_CTAG_RX;
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6;
+ ndev->vlan_features = NETIF_F_SG | NETIF_F_HW_CSUM |
+ NETIF_F_TSO | NETIF_F_TSO6;
if (si->num_rss)
ndev->hw_features |= NETIF_F_RXHASH;
/* pick up primary MAC address from SI */
- enetc_get_primary_mac_addr(&si->hw, ndev->dev_addr);
+ enetc_load_primary_mac_addr(&si->hw, ndev);
}
static int enetc_vf_probe(struct pci_dev *pdev,
@@ -143,10 +147,8 @@ static int enetc_vf_probe(struct pci_dev *pdev,
int err;
err = enetc_pci_probe(pdev, KBUILD_MODNAME, 0);
- if (err) {
- dev_err(&pdev->dev, "PCI probing failed\n");
- return err;
- }
+ if (err)
+ return dev_err_probe(&pdev->dev, err, "PCI probing failed\n");
si = pci_get_drvdata(pdev);
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index ec87b370bba1..bc418b910999 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1768,11 +1768,8 @@ static int fec_get_mac(struct net_device *ndev)
return 0;
}
- memcpy(ndev->dev_addr, iap, ETH_ALEN);
-
/* Adjust MAC if using macaddr */
- if (iap == macaddr)
- ndev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->dev_id;
+ eth_hw_addr_gen(ndev, iap, iap == macaddr ? fep->dev_id : 0);
return 0;
}
@@ -3326,7 +3323,7 @@ fec_set_mac_address(struct net_device *ndev, void *p)
if (addr) {
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
+ eth_hw_addr_set(ndev, addr->sa_data);
}
/* Add netif status check here to avoid system hang in below case:
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c
index 73ff359a15f1..bbbde9f701c2 100644
--- a/drivers/net/ethernet/freescale/fec_mpc52xx.c
+++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c
@@ -112,7 +112,7 @@ static int mpc52xx_fec_set_mac_address(struct net_device *dev, void *addr)
{
struct sockaddr *sock = addr;
- memcpy(dev->dev_addr, sock->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, sock->sa_data);
mpc52xx_fec_set_paddr(dev, sock->sa_data);
return 0;
@@ -890,7 +890,7 @@ static int mpc52xx_fec_probe(struct platform_device *op)
*
* First try to read MAC address from DT
*/
- rv = of_get_mac_address(np, ndev->dev_addr);
+ rv = of_get_ethdev_address(np, ndev);
if (rv) {
struct mpc52xx_fec __iomem *fec = priv->fec;
diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.c b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
index bce3c9398887..1950a8936bc0 100644
--- a/drivers/net/ethernet/freescale/fman/fman_dtsec.c
+++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
@@ -366,7 +366,7 @@ static void set_dflts(struct dtsec_cfg *cfg)
cfg->maximum_frame = DEFAULT_MAXIMUM_FRAME;
}
-static void set_mac_address(struct dtsec_regs __iomem *regs, u8 *adr)
+static void set_mac_address(struct dtsec_regs __iomem *regs, const u8 *adr)
{
u32 tmp;
@@ -516,7 +516,7 @@ static int init(struct dtsec_regs __iomem *regs, struct dtsec_cfg *cfg,
if (addr) {
MAKE_ENET_ADDR_FROM_UINT64(addr, eth_addr);
- set_mac_address(regs, (u8 *)eth_addr);
+ set_mac_address(regs, (const u8 *)eth_addr);
}
/* HASH */
@@ -1022,7 +1022,7 @@ int dtsec_accept_rx_pause_frames(struct fman_mac *dtsec, bool en)
return 0;
}
-int dtsec_modify_mac_address(struct fman_mac *dtsec, enet_addr_t *enet_addr)
+int dtsec_modify_mac_address(struct fman_mac *dtsec, const enet_addr_t *enet_addr)
{
struct dtsec_regs __iomem *regs = dtsec->regs;
enum comm_mode mode = COMM_MODE_NONE;
@@ -1041,7 +1041,7 @@ int dtsec_modify_mac_address(struct fman_mac *dtsec, enet_addr_t *enet_addr)
* Station address have to be swapped (big endian to little endian
*/
dtsec->addr = ENET_ADDR_TO_UINT64(*enet_addr);
- set_mac_address(dtsec->regs, (u8 *)(*enet_addr));
+ set_mac_address(dtsec->regs, (const u8 *)(*enet_addr));
graceful_start(dtsec, mode);
diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.h b/drivers/net/ethernet/freescale/fman/fman_dtsec.h
index 5149d96ec2c1..68512c3bd6e5 100644
--- a/drivers/net/ethernet/freescale/fman/fman_dtsec.h
+++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.h
@@ -37,7 +37,7 @@
struct fman_mac *dtsec_config(struct fman_mac_params *params);
int dtsec_set_promiscuous(struct fman_mac *dtsec, bool new_val);
-int dtsec_modify_mac_address(struct fman_mac *dtsec, enet_addr_t *enet_addr);
+int dtsec_modify_mac_address(struct fman_mac *dtsec, const enet_addr_t *enet_addr);
int dtsec_adjust_link(struct fman_mac *dtsec,
u16 speed);
int dtsec_restart_autoneg(struct fman_mac *dtsec);
diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c
index 62f42921933d..2216b7f51d26 100644
--- a/drivers/net/ethernet/freescale/fman/fman_memac.c
+++ b/drivers/net/ethernet/freescale/fman/fman_memac.c
@@ -354,7 +354,7 @@ struct fman_mac {
bool allmulti_enabled;
};
-static void add_addr_in_paddr(struct memac_regs __iomem *regs, u8 *adr,
+static void add_addr_in_paddr(struct memac_regs __iomem *regs, const u8 *adr,
u8 paddr_num)
{
u32 tmp0, tmp1;
@@ -897,12 +897,12 @@ int memac_accept_rx_pause_frames(struct fman_mac *memac, bool en)
return 0;
}
-int memac_modify_mac_address(struct fman_mac *memac, enet_addr_t *enet_addr)
+int memac_modify_mac_address(struct fman_mac *memac, const enet_addr_t *enet_addr)
{
if (!is_init_done(memac->memac_drv_param))
return -EINVAL;
- add_addr_in_paddr(memac->regs, (u8 *)(*enet_addr), 0);
+ add_addr_in_paddr(memac->regs, (const u8 *)(*enet_addr), 0);
return 0;
}
@@ -1058,7 +1058,7 @@ int memac_init(struct fman_mac *memac)
/* MAC Address */
if (memac->addr != 0) {
MAKE_ENET_ADDR_FROM_UINT64(memac->addr, eth_addr);
- add_addr_in_paddr(memac->regs, (u8 *)eth_addr, 0);
+ add_addr_in_paddr(memac->regs, (const u8 *)eth_addr, 0);
}
fixed_link = memac_drv_param->fixed_link;
diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.h b/drivers/net/ethernet/freescale/fman/fman_memac.h
index b2c671ec0ce7..3820f7a22983 100644
--- a/drivers/net/ethernet/freescale/fman/fman_memac.h
+++ b/drivers/net/ethernet/freescale/fman/fman_memac.h
@@ -40,7 +40,7 @@
struct fman_mac *memac_config(struct fman_mac_params *params);
int memac_set_promiscuous(struct fman_mac *memac, bool new_val);
-int memac_modify_mac_address(struct fman_mac *memac, enet_addr_t *enet_addr);
+int memac_modify_mac_address(struct fman_mac *memac, const enet_addr_t *enet_addr);
int memac_adjust_link(struct fman_mac *memac, u16 speed);
int memac_cfg_max_frame_len(struct fman_mac *memac, u16 new_val);
int memac_cfg_reset_on_init(struct fman_mac *memac, bool enable);
diff --git a/drivers/net/ethernet/freescale/fman/fman_tgec.c b/drivers/net/ethernet/freescale/fman/fman_tgec.c
index 41946b16f6c7..311c1906e044 100644
--- a/drivers/net/ethernet/freescale/fman/fman_tgec.c
+++ b/drivers/net/ethernet/freescale/fman/fman_tgec.c
@@ -221,7 +221,7 @@ struct fman_mac {
bool allmulti_enabled;
};
-static void set_mac_address(struct tgec_regs __iomem *regs, u8 *adr)
+static void set_mac_address(struct tgec_regs __iomem *regs, const u8 *adr)
{
u32 tmp0, tmp1;
@@ -514,13 +514,13 @@ int tgec_accept_rx_pause_frames(struct fman_mac *tgec, bool en)
return 0;
}
-int tgec_modify_mac_address(struct fman_mac *tgec, enet_addr_t *p_enet_addr)
+int tgec_modify_mac_address(struct fman_mac *tgec, const enet_addr_t *p_enet_addr)
{
if (!is_init_done(tgec->cfg))
return -EINVAL;
tgec->addr = ENET_ADDR_TO_UINT64(*p_enet_addr);
- set_mac_address(tgec->regs, (u8 *)(*p_enet_addr));
+ set_mac_address(tgec->regs, (const u8 *)(*p_enet_addr));
return 0;
}
@@ -704,7 +704,7 @@ int tgec_init(struct fman_mac *tgec)
if (tgec->addr) {
MAKE_ENET_ADDR_FROM_UINT64(tgec->addr, eth_addr);
- set_mac_address(tgec->regs, (u8 *)eth_addr);
+ set_mac_address(tgec->regs, (const u8 *)eth_addr);
}
/* interrupts */
diff --git a/drivers/net/ethernet/freescale/fman/fman_tgec.h b/drivers/net/ethernet/freescale/fman/fman_tgec.h
index 3bfd1062b386..b28b20b26148 100644
--- a/drivers/net/ethernet/freescale/fman/fman_tgec.h
+++ b/drivers/net/ethernet/freescale/fman/fman_tgec.h
@@ -37,7 +37,7 @@
struct fman_mac *tgec_config(struct fman_mac_params *params);
int tgec_set_promiscuous(struct fman_mac *tgec, bool new_val);
-int tgec_modify_mac_address(struct fman_mac *tgec, enet_addr_t *enet_addr);
+int tgec_modify_mac_address(struct fman_mac *tgec, const enet_addr_t *enet_addr);
int tgec_cfg_max_frame_len(struct fman_mac *tgec, u16 new_val);
int tgec_enable(struct fman_mac *tgec, enum comm_mode mode);
int tgec_disable(struct fman_mac *tgec, enum comm_mode mode);
diff --git a/drivers/net/ethernet/freescale/fman/mac.h b/drivers/net/ethernet/freescale/fman/mac.h
index 824a81a9f350..daa285a9b8b2 100644
--- a/drivers/net/ethernet/freescale/fman/mac.h
+++ b/drivers/net/ethernet/freescale/fman/mac.h
@@ -66,7 +66,7 @@ struct mac_device {
int (*stop)(struct mac_device *mac_dev);
void (*adjust_link)(struct mac_device *mac_dev);
int (*set_promisc)(struct fman_mac *mac_dev, bool enable);
- int (*change_addr)(struct fman_mac *mac_dev, enet_addr_t *enet_addr);
+ int (*change_addr)(struct fman_mac *mac_dev, const enet_addr_t *enet_addr);
int (*set_allmulti)(struct fman_mac *mac_dev, bool enable);
int (*set_tstamp)(struct fman_mac *mac_dev, bool enable);
int (*set_multi)(struct net_device *net_dev,
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index 2db6e38a772e..bacf25318f87 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -1005,7 +1005,7 @@ static int fs_enet_probe(struct platform_device *ofdev)
spin_lock_init(&fep->lock);
spin_lock_init(&fep->tx_lock);
- of_get_mac_address(ofdev->dev.of_node, ndev->dev_addr);
+ of_get_ethdev_address(ofdev->dev.of_node, ndev);
ret = fep->ops->allocate_bd(ndev);
if (ret)
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index af6ad94bf24a..acab58fd3db3 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -753,7 +753,7 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
if (stash_len || stash_idx)
priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING;
- err = of_get_mac_address(np, dev->dev_addr);
+ err = of_get_ethdev_address(np, dev);
if (err) {
eth_hw_addr_random(dev);
dev_info(&ofdev->dev, "Using random MAC address: %pM\n", dev->dev_addr);
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 3eb288d10b0c..823221c912ab 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -3205,7 +3205,7 @@ static int ucc_geth_set_mac_addr(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, addr->sa_data);
/*
* If device is not running, we will set mac addr register
@@ -3731,7 +3731,7 @@ static int ucc_geth_probe(struct platform_device* ofdev)
goto err_free_netdev;
}
- of_get_mac_address(np, dev->dev_addr);
+ of_get_ethdev_address(np, dev);
ugeth->ug_info = ug_info;
ugeth->dev = device;
diff --git a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
index 62c0bed82ced..b0d733e9a7c6 100644
--- a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
+++ b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
@@ -334,6 +334,7 @@ static int fmvj18x_config(struct pcmcia_device *link)
u8 *buf;
size_t len;
u_char buggybuf[32];
+ u8 addr[ETH_ALEN];
dev_dbg(&link->dev, "fmvj18x_config\n");
@@ -468,8 +469,7 @@ static int fmvj18x_config(struct pcmcia_device *link)
goto failed;
}
/* Read MACID from CIS */
- for (i = 0; i < 6; i++)
- dev->dev_addr[i] = buf[i + 5];
+ eth_hw_addr_set(dev, &buf[5]);
kfree(buf);
} else {
if (pcmcia_get_mac_from_cis(link, dev))
@@ -490,7 +490,8 @@ static int fmvj18x_config(struct pcmcia_device *link)
case UNGERMANN:
/* Read MACID from register */
for (i = 0; i < 6; i++)
- dev->dev_addr[i] = inb(ioaddr + UNGERMANN_MAC_ID + i);
+ addr[i] = inb(ioaddr + UNGERMANN_MAC_ID + i);
+ eth_hw_addr_set(dev, addr);
card_name = "Access/CARD";
break;
case XXX10304:
@@ -499,16 +500,15 @@ static int fmvj18x_config(struct pcmcia_device *link)
pr_notice("unable to read hardware net address\n");
goto failed;
}
- for (i = 0 ; i < 6; i++) {
- dev->dev_addr[i] = buggybuf[i];
- }
+ eth_hw_addr_set(dev, buggybuf);
card_name = "FMV-J182";
break;
case MBH10302:
default:
/* Read MACID from register */
for (i = 0; i < 6; i++)
- dev->dev_addr[i] = inb(ioaddr + MAC_ID + i);
+ addr[i] = inb(ioaddr + MAC_ID + i);
+ eth_hw_addr_set(dev, addr);
card_name = "FMV-J181";
break;
}
diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h
index 92dc18a4bcc4..51ed8fe71d2d 100644
--- a/drivers/net/ethernet/google/gve/gve.h
+++ b/drivers/net/ethernet/google/gve/gve.h
@@ -30,7 +30,7 @@
#define GVE_MIN_MSIX 3
/* Numbers of gve tx/rx stats in stats report. */
-#define GVE_TX_STATS_REPORT_NUM 5
+#define GVE_TX_STATS_REPORT_NUM 6
#define GVE_RX_STATS_REPORT_NUM 2
/* Interval to schedule a stats report update, 20000ms. */
@@ -224,11 +224,6 @@ struct gve_tx_iovec {
u32 iov_padding; /* padding associated with this segment */
};
-struct gve_tx_dma_buf {
- DEFINE_DMA_UNMAP_ADDR(dma);
- DEFINE_DMA_UNMAP_LEN(len);
-};
-
/* Tracks the memory in the fifo occupied by the skb. Mapped 1:1 to a desc
* ring entry but only used for a pkt_desc not a seg_desc
*/
@@ -236,7 +231,10 @@ struct gve_tx_buffer_state {
struct sk_buff *skb; /* skb for this pkt */
union {
struct gve_tx_iovec iov[GVE_TX_MAX_IOVEC]; /* segments of this pkt */
- struct gve_tx_dma_buf buf;
+ struct {
+ DEFINE_DMA_UNMAP_ADDR(dma);
+ DEFINE_DMA_UNMAP_LEN(len);
+ };
};
};
@@ -280,7 +278,8 @@ struct gve_tx_pending_packet_dqo {
* All others correspond to `skb`'s frags and should be unmapped with
* `dma_unmap_page`.
*/
- struct gve_tx_dma_buf bufs[MAX_SKB_FRAGS + 1];
+ DEFINE_DMA_UNMAP_ADDR(dma[MAX_SKB_FRAGS + 1]);
+ DEFINE_DMA_UNMAP_LEN(len[MAX_SKB_FRAGS + 1]);
u16 num_bufs;
/* Linked list index to next element in the list, or -1 if none */
@@ -342,8 +341,8 @@ struct gve_tx_ring {
union {
/* GQI fields */
struct {
- /* NIC tail pointer */
- __be32 last_nic_done;
+ /* Spinlock for when cleanup in progress */
+ spinlock_t clean_lock;
};
/* DQO fields. */
@@ -414,7 +413,9 @@ struct gve_tx_ring {
u32 q_num ____cacheline_aligned; /* queue idx */
u32 stop_queue; /* count of queue stops */
u32 wake_queue; /* count of queue wakes */
+ u32 queue_timeout; /* count of queue timeouts */
u32 ntfy_id; /* notification block index */
+ u32 last_kick_msec; /* Last time the queue was kicked */
dma_addr_t bus; /* dma address of the descr ring */
dma_addr_t q_resources_bus; /* dma address of the queue resources */
dma_addr_t complq_bus_dqo; /* dma address of the dqo.compl_ring */
@@ -822,15 +823,15 @@ netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev);
bool gve_tx_poll(struct gve_notify_block *block, int budget);
int gve_tx_alloc_rings(struct gve_priv *priv);
void gve_tx_free_rings_gqi(struct gve_priv *priv);
-__be32 gve_tx_load_event_counter(struct gve_priv *priv,
- struct gve_tx_ring *tx);
+u32 gve_tx_load_event_counter(struct gve_priv *priv,
+ struct gve_tx_ring *tx);
+bool gve_tx_clean_pending(struct gve_priv *priv, struct gve_tx_ring *tx);
/* rx handling */
void gve_rx_write_doorbell(struct gve_priv *priv, struct gve_rx_ring *rx);
-bool gve_rx_poll(struct gve_notify_block *block, int budget);
+int gve_rx_poll(struct gve_notify_block *block, int budget);
+bool gve_rx_work_pending(struct gve_rx_ring *rx);
int gve_rx_alloc_rings(struct gve_priv *priv);
void gve_rx_free_rings_gqi(struct gve_priv *priv);
-bool gve_clean_rx_done(struct gve_rx_ring *rx, int budget,
- netdev_features_t feat);
/* Reset */
void gve_schedule_reset(struct gve_priv *priv);
int gve_reset(struct gve_priv *priv, bool attempt_teardown);
diff --git a/drivers/net/ethernet/google/gve/gve_adminq.c b/drivers/net/ethernet/google/gve/gve_adminq.c
index f089d33dd48e..af2c1d1535f5 100644
--- a/drivers/net/ethernet/google/gve/gve_adminq.c
+++ b/drivers/net/ethernet/google/gve/gve_adminq.c
@@ -733,7 +733,7 @@ int gve_adminq_describe_device(struct gve_priv *priv)
}
priv->dev->max_mtu = mtu;
priv->num_event_counters = be16_to_cpu(descriptor->counters);
- ether_addr_copy(priv->dev->dev_addr, descriptor->mac);
+ eth_hw_addr_set(priv->dev, descriptor->mac);
mac = descriptor->mac;
dev_info(&priv->pdev->dev, "MAC addr: %pM\n", mac);
priv->tx_pages_per_qpl = be16_to_cpu(descriptor->tx_pages_per_qpl);
diff --git a/drivers/net/ethernet/google/gve/gve_adminq.h b/drivers/net/ethernet/google/gve/gve_adminq.h
index 47c3d8f313fc..3953f6f7a427 100644
--- a/drivers/net/ethernet/google/gve/gve_adminq.h
+++ b/drivers/net/ethernet/google/gve/gve_adminq.h
@@ -270,6 +270,7 @@ enum gve_stat_names {
TX_LAST_COMPLETION_PROCESSED = 5,
RX_NEXT_EXPECTED_SEQUENCE = 6,
RX_BUFFERS_POSTED = 7,
+ TX_TIMEOUT_CNT = 8,
// stats from NIC
RX_QUEUE_DROP_CNT = 65,
RX_NO_BUFFERS_POSTED = 66,
diff --git a/drivers/net/ethernet/google/gve/gve_ethtool.c b/drivers/net/ethernet/google/gve/gve_ethtool.c
index 716e6240305d..618a3e1d858e 100644
--- a/drivers/net/ethernet/google/gve/gve_ethtool.c
+++ b/drivers/net/ethernet/google/gve/gve_ethtool.c
@@ -330,8 +330,7 @@ gve_get_ethtool_stats(struct net_device *netdev,
data[i++] = tmp_tx_bytes;
data[i++] = tx->wake_queue;
data[i++] = tx->stop_queue;
- data[i++] = be32_to_cpu(gve_tx_load_event_counter(priv,
- tx));
+ data[i++] = gve_tx_load_event_counter(priv, tx);
data[i++] = tx->dma_mapping_error;
/* stats from NIC */
if (skip_nic_stats) {
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index bf8a4a7c43f7..7647cd05b1d2 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -24,6 +24,9 @@
#define GVE_VERSION "1.0.0"
#define GVE_VERSION_PREFIX "GVE-"
+// Minimum amount of time between queue kicks in msec (10 seconds)
+#define MIN_TX_TIMEOUT_GAP (1000 * 10)
+
const char gve_version_str[] = GVE_VERSION;
static const char gve_version_prefix[] = GVE_VERSION_PREFIX;
@@ -192,34 +195,40 @@ static int gve_napi_poll(struct napi_struct *napi, int budget)
__be32 __iomem *irq_doorbell;
bool reschedule = false;
struct gve_priv *priv;
+ int work_done = 0;
block = container_of(napi, struct gve_notify_block, napi);
priv = block->priv;
if (block->tx)
reschedule |= gve_tx_poll(block, budget);
- if (block->rx)
- reschedule |= gve_rx_poll(block, budget);
+ if (block->rx) {
+ work_done = gve_rx_poll(block, budget);
+ reschedule |= work_done == budget;
+ }
if (reschedule)
return budget;
- napi_complete(napi);
- irq_doorbell = gve_irq_doorbell(priv, block);
- iowrite32be(GVE_IRQ_ACK | GVE_IRQ_EVENT, irq_doorbell);
+ /* Complete processing - don't unmask irq if busy polling is enabled */
+ if (likely(napi_complete_done(napi, work_done))) {
+ irq_doorbell = gve_irq_doorbell(priv, block);
+ iowrite32be(GVE_IRQ_ACK | GVE_IRQ_EVENT, irq_doorbell);
- /* Double check we have no extra work.
- * Ensure unmask synchronizes with checking for work.
- */
- mb();
- if (block->tx)
- reschedule |= gve_tx_poll(block, -1);
- if (block->rx)
- reschedule |= gve_rx_poll(block, -1);
- if (reschedule && napi_reschedule(napi))
- iowrite32be(GVE_IRQ_MASK, irq_doorbell);
+ /* Ensure IRQ ACK is visible before we check pending work.
+ * If queue had issued updates, it would be truly visible.
+ */
+ mb();
- return 0;
+ if (block->tx)
+ reschedule |= gve_tx_clean_pending(priv, block->tx);
+ if (block->rx)
+ reschedule |= gve_rx_work_pending(block->rx);
+
+ if (reschedule && napi_reschedule(napi))
+ iowrite32be(GVE_IRQ_MASK, irq_doorbell);
+ }
+ return work_done;
}
static int gve_napi_poll_dqo(struct napi_struct *napi, int budget)
@@ -279,7 +288,7 @@ static int gve_alloc_notify_blocks(struct gve_priv *priv)
int i, j;
int err;
- priv->msix_vectors = kvzalloc(num_vecs_requested *
+ priv->msix_vectors = kvcalloc(num_vecs_requested,
sizeof(*priv->msix_vectors), GFP_KERNEL);
if (!priv->msix_vectors)
return -ENOMEM;
@@ -640,7 +649,7 @@ static int gve_alloc_rings(struct gve_priv *priv)
int err;
/* Setup tx rings */
- priv->tx = kvzalloc(priv->tx_cfg.num_queues * sizeof(*priv->tx),
+ priv->tx = kvcalloc(priv->tx_cfg.num_queues, sizeof(*priv->tx),
GFP_KERNEL);
if (!priv->tx)
return -ENOMEM;
@@ -653,7 +662,7 @@ static int gve_alloc_rings(struct gve_priv *priv)
goto free_tx;
/* Setup rx rings */
- priv->rx = kvzalloc(priv->rx_cfg.num_queues * sizeof(*priv->rx),
+ priv->rx = kvcalloc(priv->rx_cfg.num_queues, sizeof(*priv->rx),
GFP_KERNEL);
if (!priv->rx) {
err = -ENOMEM;
@@ -776,12 +785,11 @@ static int gve_alloc_queue_page_list(struct gve_priv *priv, u32 id,
qpl->id = id;
qpl->num_entries = 0;
- qpl->pages = kvzalloc(pages * sizeof(*qpl->pages), GFP_KERNEL);
+ qpl->pages = kvcalloc(pages, sizeof(*qpl->pages), GFP_KERNEL);
/* caller handles clean up */
if (!qpl->pages)
return -ENOMEM;
- qpl->page_buses = kvzalloc(pages * sizeof(*qpl->page_buses),
- GFP_KERNEL);
+ qpl->page_buses = kvcalloc(pages, sizeof(*qpl->page_buses), GFP_KERNEL);
/* caller handles clean up */
if (!qpl->page_buses)
return -ENOMEM;
@@ -840,7 +848,7 @@ static int gve_alloc_qpls(struct gve_priv *priv)
if (priv->queue_format == GVE_GQI_RDA_FORMAT)
return 0;
- priv->qpls = kvzalloc(num_qpls * sizeof(*priv->qpls), GFP_KERNEL);
+ priv->qpls = kvcalloc(num_qpls, sizeof(*priv->qpls), GFP_KERNEL);
if (!priv->qpls)
return -ENOMEM;
@@ -859,7 +867,7 @@ static int gve_alloc_qpls(struct gve_priv *priv)
priv->qpl_cfg.qpl_map_size = BITS_TO_LONGS(num_qpls) *
sizeof(unsigned long) * BITS_PER_BYTE;
- priv->qpl_cfg.qpl_id_map = kvzalloc(BITS_TO_LONGS(num_qpls) *
+ priv->qpl_cfg.qpl_id_map = kvcalloc(BITS_TO_LONGS(num_qpls),
sizeof(unsigned long), GFP_KERNEL);
if (!priv->qpl_cfg.qpl_id_map) {
err = -ENOMEM;
@@ -1116,9 +1124,47 @@ static void gve_turnup(struct gve_priv *priv)
static void gve_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
- struct gve_priv *priv = netdev_priv(dev);
+ struct gve_notify_block *block;
+ struct gve_tx_ring *tx = NULL;
+ struct gve_priv *priv;
+ u32 last_nic_done;
+ u32 current_time;
+ u32 ntfy_idx;
+ netdev_info(dev, "Timeout on tx queue, %d", txqueue);
+ priv = netdev_priv(dev);
+ if (txqueue > priv->tx_cfg.num_queues)
+ goto reset;
+
+ ntfy_idx = gve_tx_idx_to_ntfy(priv, txqueue);
+ if (ntfy_idx > priv->num_ntfy_blks)
+ goto reset;
+
+ block = &priv->ntfy_blocks[ntfy_idx];
+ tx = block->tx;
+
+ current_time = jiffies_to_msecs(jiffies);
+ if (tx->last_kick_msec + MIN_TX_TIMEOUT_GAP > current_time)
+ goto reset;
+
+ /* Check to see if there are missed completions, which will allow us to
+ * kick the queue.
+ */
+ last_nic_done = gve_tx_load_event_counter(priv, tx);
+ if (last_nic_done - tx->done) {
+ netdev_info(dev, "Kicking queue %d", txqueue);
+ iowrite32be(GVE_IRQ_MASK, gve_irq_doorbell(priv, block));
+ napi_schedule(&block->napi);
+ tx->last_kick_msec = current_time;
+ goto out;
+ } // Else reset.
+
+reset:
gve_schedule_reset(priv);
+
+out:
+ if (tx)
+ tx->queue_timeout++;
priv->tx_timeo_cnt++;
}
@@ -1247,6 +1293,11 @@ void gve_handle_report_stats(struct gve_priv *priv)
.value = cpu_to_be64(last_completion),
.queue_id = cpu_to_be32(idx),
};
+ stats[stats_idx++] = (struct stats) {
+ .stat_name = cpu_to_be32(TX_TIMEOUT_CNT),
+ .value = cpu_to_be64(priv->tx[idx].queue_timeout),
+ .queue_id = cpu_to_be32(idx),
+ };
}
}
/* rx stats */
diff --git a/drivers/net/ethernet/google/gve/gve_rx.c b/drivers/net/ethernet/google/gve/gve_rx.c
index 94941d4e4744..95bc4d8a1811 100644
--- a/drivers/net/ethernet/google/gve/gve_rx.c
+++ b/drivers/net/ethernet/google/gve/gve_rx.c
@@ -16,19 +16,23 @@ static void gve_rx_free_buffer(struct device *dev,
dma_addr_t dma = (dma_addr_t)(be64_to_cpu(data_slot->addr) &
GVE_DATA_SLOT_ADDR_PAGE_MASK);
+ page_ref_sub(page_info->page, page_info->pagecnt_bias - 1);
gve_free_page(dev, page_info->page, dma, DMA_FROM_DEVICE);
}
static void gve_rx_unfill_pages(struct gve_priv *priv, struct gve_rx_ring *rx)
{
- if (rx->data.raw_addressing) {
- u32 slots = rx->mask + 1;
- int i;
+ u32 slots = rx->mask + 1;
+ int i;
+ if (rx->data.raw_addressing) {
for (i = 0; i < slots; i++)
gve_rx_free_buffer(&priv->pdev->dev, &rx->data.page_info[i],
&rx->data.data_ring[i]);
} else {
+ for (i = 0; i < slots; i++)
+ page_ref_sub(rx->data.page_info[i].page,
+ rx->data.page_info[i].pagecnt_bias - 1);
gve_unassign_qpl(priv, rx->data.qpl->id);
rx->data.qpl = NULL;
}
@@ -69,6 +73,9 @@ static void gve_setup_rx_buffer(struct gve_rx_slot_page_info *page_info,
page_info->page_offset = 0;
page_info->page_address = page_address(page);
*slot_addr = cpu_to_be64(addr);
+ /* The page already has 1 ref */
+ page_ref_add(page, INT_MAX - 1);
+ page_info->pagecnt_bias = INT_MAX;
}
static int gve_rx_alloc_buffer(struct gve_priv *priv, struct device *dev,
@@ -295,21 +302,22 @@ static void gve_rx_flip_buff(struct gve_rx_slot_page_info *page_info, __be64 *sl
static bool gve_rx_can_flip_buffers(struct net_device *netdev)
{
- return PAGE_SIZE == 4096
+ return PAGE_SIZE >= 4096
? netdev->mtu + GVE_RX_PAD + ETH_HLEN <= PAGE_SIZE / 2 : false;
}
-static int gve_rx_can_recycle_buffer(struct page *page)
+static int gve_rx_can_recycle_buffer(struct gve_rx_slot_page_info *page_info)
{
- int pagecount = page_count(page);
+ int pagecount = page_count(page_info->page);
/* This page is not being used by any SKBs - reuse */
- if (pagecount == 1)
+ if (pagecount == page_info->pagecnt_bias)
return 1;
/* This page is still being used by an SKB - we can't reuse */
- else if (pagecount >= 2)
+ else if (pagecount > page_info->pagecnt_bias)
return 0;
- WARN(pagecount < 1, "Pagecount should never be < 1");
+ WARN(pagecount < page_info->pagecnt_bias,
+ "Pagecount should never be less than the bias.");
return -1;
}
@@ -325,11 +333,11 @@ gve_rx_raw_addressing(struct device *dev, struct net_device *netdev,
if (!skb)
return NULL;
- /* Optimistically stop the kernel from freeing the page by increasing
- * the page bias. We will check the refcount in refill to determine if
- * we need to alloc a new page.
+ /* Optimistically stop the kernel from freeing the page.
+ * We will check again in refill to determine if we need to alloc a
+ * new page.
*/
- get_page(page_info->page);
+ gve_dec_pagecnt_bias(page_info);
return skb;
}
@@ -352,7 +360,7 @@ gve_rx_qpl(struct device *dev, struct net_device *netdev,
/* No point in recycling if we didn't get the skb */
if (skb) {
/* Make sure that the page isn't freed. */
- get_page(page_info->page);
+ gve_dec_pagecnt_bias(page_info);
gve_rx_flip_buff(page_info, &data_slot->qpl_offset);
}
} else {
@@ -376,8 +384,18 @@ static bool gve_rx(struct gve_rx_ring *rx, struct gve_rx_desc *rx_desc,
union gve_rx_data_slot *data_slot;
struct sk_buff *skb = NULL;
dma_addr_t page_bus;
+ void *va;
u16 len;
+ /* Prefetch two packet pages ahead, we will need it soon. */
+ page_info = &rx->data.page_info[(idx + 2) & rx->mask];
+ va = page_info->page_address + GVE_RX_PAD +
+ page_info->page_offset;
+
+ prefetch(page_info->page); /* Kernel page struct. */
+ prefetch(va); /* Packet header. */
+ prefetch(va + 64); /* Next cacheline too. */
+
/* drop this packet */
if (unlikely(rx_desc->flags_seq & GVE_RXF_ERR)) {
u64_stats_update_begin(&rx->statss);
@@ -408,7 +426,7 @@ static bool gve_rx(struct gve_rx_ring *rx, struct gve_rx_desc *rx_desc,
int recycle = 0;
if (can_flip) {
- recycle = gve_rx_can_recycle_buffer(page_info->page);
+ recycle = gve_rx_can_recycle_buffer(page_info);
if (recycle < 0) {
if (!rx->data.raw_addressing)
gve_schedule_reset(priv);
@@ -456,7 +474,7 @@ static bool gve_rx(struct gve_rx_ring *rx, struct gve_rx_desc *rx_desc,
return true;
}
-static bool gve_rx_work_pending(struct gve_rx_ring *rx)
+bool gve_rx_work_pending(struct gve_rx_ring *rx)
{
struct gve_rx_desc *desc;
__be16 flags_seq;
@@ -499,7 +517,7 @@ static bool gve_rx_refill_buffers(struct gve_priv *priv, struct gve_rx_ring *rx)
* owns half the page it is impossible to tell which half. Either
* the whole page is free or it needs to be replaced.
*/
- int recycle = gve_rx_can_recycle_buffer(page_info->page);
+ int recycle = gve_rx_can_recycle_buffer(page_info);
if (recycle < 0) {
if (!rx->data.raw_addressing)
@@ -514,8 +532,13 @@ static bool gve_rx_refill_buffers(struct gve_priv *priv, struct gve_rx_ring *rx)
gve_rx_free_buffer(dev, page_info, data_slot);
page_info->page = NULL;
- if (gve_rx_alloc_buffer(priv, dev, page_info, data_slot))
+ if (gve_rx_alloc_buffer(priv, dev, page_info,
+ data_slot)) {
+ u64_stats_update_begin(&rx->statss);
+ rx->rx_buf_alloc_fail++;
+ u64_stats_update_end(&rx->statss);
break;
+ }
}
}
fill_cnt++;
@@ -524,8 +547,8 @@ static bool gve_rx_refill_buffers(struct gve_priv *priv, struct gve_rx_ring *rx)
return true;
}
-bool gve_clean_rx_done(struct gve_rx_ring *rx, int budget,
- netdev_features_t feat)
+static int gve_clean_rx_done(struct gve_rx_ring *rx, int budget,
+ netdev_features_t feat)
{
struct gve_priv *priv = rx->gve;
u32 work_done = 0, packets = 0;
@@ -546,6 +569,10 @@ bool gve_clean_rx_done(struct gve_rx_ring *rx, int budget,
"[%d] seqno=%d rx->desc.seqno=%d\n",
rx->q_num, GVE_SEQNO(desc->flags_seq),
rx->desc.seqno);
+
+ /* prefetch two descriptors ahead */
+ prefetch(rx->desc.desc_ring + ((cnt + 2) & rx->mask));
+
dropped = !gve_rx(rx, desc, feat, idx);
if (!dropped) {
bytes += be16_to_cpu(desc->len) - GVE_RX_PAD;
@@ -559,13 +586,15 @@ bool gve_clean_rx_done(struct gve_rx_ring *rx, int budget,
}
if (!work_done && rx->fill_cnt - cnt > rx->db_threshold)
- return false;
+ return 0;
- u64_stats_update_begin(&rx->statss);
- rx->rpackets += packets;
- rx->rbytes += bytes;
- u64_stats_update_end(&rx->statss);
- rx->cnt = cnt;
+ if (work_done) {
+ u64_stats_update_begin(&rx->statss);
+ rx->rpackets += packets;
+ rx->rbytes += bytes;
+ u64_stats_update_end(&rx->statss);
+ rx->cnt = cnt;
+ }
/* restock ring slots */
if (!rx->data.raw_addressing) {
@@ -576,26 +605,26 @@ bool gve_clean_rx_done(struct gve_rx_ring *rx, int budget,
* falls below a threshold.
*/
if (!gve_rx_refill_buffers(priv, rx))
- return false;
+ return 0;
/* If we were not able to completely refill buffers, we'll want
* to schedule this queue for work again to refill buffers.
*/
if (rx->fill_cnt - cnt <= rx->db_threshold) {
gve_rx_write_doorbell(priv, rx);
- return true;
+ return budget;
}
}
gve_rx_write_doorbell(priv, rx);
- return gve_rx_work_pending(rx);
+ return work_done;
}
-bool gve_rx_poll(struct gve_notify_block *block, int budget)
+int gve_rx_poll(struct gve_notify_block *block, int budget)
{
struct gve_rx_ring *rx = block->rx;
netdev_features_t feat;
- bool repoll = false;
+ int work_done = 0;
feat = block->napi.dev->features;
@@ -604,8 +633,7 @@ bool gve_rx_poll(struct gve_notify_block *block, int budget)
budget = INT_MAX;
if (budget > 0)
- repoll |= gve_clean_rx_done(rx, budget, feat);
- else
- repoll |= gve_rx_work_pending(rx);
- return repoll;
+ work_done = gve_clean_rx_done(rx, budget, feat);
+
+ return work_done;
}
diff --git a/drivers/net/ethernet/google/gve/gve_tx.c b/drivers/net/ethernet/google/gve/gve_tx.c
index 665ac795a1ad..a9cb241fedf4 100644
--- a/drivers/net/ethernet/google/gve/gve_tx.c
+++ b/drivers/net/ethernet/google/gve/gve_tx.c
@@ -144,7 +144,7 @@ static void gve_tx_free_ring(struct gve_priv *priv, int idx)
gve_tx_remove_from_block(priv, idx);
slots = tx->mask + 1;
- gve_clean_tx_done(priv, tx, tx->req, false);
+ gve_clean_tx_done(priv, tx, priv->tx_desc_cnt, false);
netdev_tx_reset_queue(tx->netdev_txq);
dma_free_coherent(hdev, sizeof(*tx->q_resources),
@@ -176,6 +176,7 @@ static int gve_tx_alloc_ring(struct gve_priv *priv, int idx)
/* Make sure everything is zeroed to start */
memset(tx, 0, sizeof(*tx));
+ spin_lock_init(&tx->clean_lock);
tx->q_num = idx;
tx->mask = slots - 1;
@@ -303,15 +304,15 @@ static inline int gve_skb_fifo_bytes_required(struct gve_tx_ring *tx,
static void gve_tx_unmap_buf(struct device *dev, struct gve_tx_buffer_state *info)
{
if (info->skb) {
- dma_unmap_single(dev, dma_unmap_addr(&info->buf, dma),
- dma_unmap_len(&info->buf, len),
+ dma_unmap_single(dev, dma_unmap_addr(info, dma),
+ dma_unmap_len(info, len),
DMA_TO_DEVICE);
- dma_unmap_len_set(&info->buf, len, 0);
+ dma_unmap_len_set(info, len, 0);
} else {
- dma_unmap_page(dev, dma_unmap_addr(&info->buf, dma),
- dma_unmap_len(&info->buf, len),
+ dma_unmap_page(dev, dma_unmap_addr(info, dma),
+ dma_unmap_len(info, len),
DMA_TO_DEVICE);
- dma_unmap_len_set(&info->buf, len, 0);
+ dma_unmap_len_set(info, len, 0);
}
}
@@ -328,10 +329,16 @@ static inline bool gve_can_tx(struct gve_tx_ring *tx, int bytes_required)
return (gve_tx_avail(tx) >= MAX_TX_DESC_NEEDED && can_alloc);
}
+static_assert(NAPI_POLL_WEIGHT >= MAX_TX_DESC_NEEDED);
+
/* Stops the queue if the skb cannot be transmitted. */
-static int gve_maybe_stop_tx(struct gve_tx_ring *tx, struct sk_buff *skb)
+static int gve_maybe_stop_tx(struct gve_priv *priv, struct gve_tx_ring *tx,
+ struct sk_buff *skb)
{
int bytes_required = 0;
+ u32 nic_done;
+ u32 to_do;
+ int ret;
if (!tx->raw_addressing)
bytes_required = gve_skb_fifo_bytes_required(tx, skb);
@@ -339,29 +346,28 @@ static int gve_maybe_stop_tx(struct gve_tx_ring *tx, struct sk_buff *skb)
if (likely(gve_can_tx(tx, bytes_required)))
return 0;
- /* No space, so stop the queue */
- tx->stop_queue++;
- netif_tx_stop_queue(tx->netdev_txq);
- smp_mb(); /* sync with restarting queue in gve_clean_tx_done() */
-
- /* Now check for resources again, in case gve_clean_tx_done() freed
- * resources after we checked and we stopped the queue after
- * gve_clean_tx_done() checked.
- *
- * gve_maybe_stop_tx() gve_clean_tx_done()
- * nsegs/can_alloc test failed
- * gve_tx_free_fifo()
- * if (tx queue stopped)
- * netif_tx_queue_wake()
- * netif_tx_stop_queue()
- * Need to check again for space here!
- */
- if (likely(!gve_can_tx(tx, bytes_required)))
- return -EBUSY;
+ ret = -EBUSY;
+ spin_lock(&tx->clean_lock);
+ nic_done = gve_tx_load_event_counter(priv, tx);
+ to_do = nic_done - tx->done;
- netif_tx_start_queue(tx->netdev_txq);
- tx->wake_queue++;
- return 0;
+ /* Only try to clean if there is hope for TX */
+ if (to_do + gve_tx_avail(tx) >= MAX_TX_DESC_NEEDED) {
+ if (to_do > 0) {
+ to_do = min_t(u32, to_do, NAPI_POLL_WEIGHT);
+ gve_clean_tx_done(priv, tx, to_do, false);
+ }
+ if (likely(gve_can_tx(tx, bytes_required)))
+ ret = 0;
+ }
+ if (ret) {
+ /* No space, so stop the queue */
+ tx->stop_queue++;
+ netif_tx_stop_queue(tx->netdev_txq);
+ }
+ spin_unlock(&tx->clean_lock);
+
+ return ret;
}
static void gve_tx_fill_pkt_desc(union gve_tx_desc *pkt_desc,
@@ -491,7 +497,6 @@ static int gve_tx_add_skb_no_copy(struct gve_priv *priv, struct gve_tx_ring *tx,
struct gve_tx_buffer_state *info;
bool is_gso = skb_is_gso(skb);
u32 idx = tx->req & tx->mask;
- struct gve_tx_dma_buf *buf;
u64 addr;
u32 len;
int i;
@@ -515,9 +520,8 @@ static int gve_tx_add_skb_no_copy(struct gve_priv *priv, struct gve_tx_ring *tx,
tx->dma_mapping_error++;
goto drop;
}
- buf = &info->buf;
- dma_unmap_len_set(buf, len, len);
- dma_unmap_addr_set(buf, dma, addr);
+ dma_unmap_len_set(info, len, len);
+ dma_unmap_addr_set(info, dma, addr);
payload_nfrags = shinfo->nr_frags;
if (hlen < len) {
@@ -549,10 +553,9 @@ static int gve_tx_add_skb_no_copy(struct gve_priv *priv, struct gve_tx_ring *tx,
tx->dma_mapping_error++;
goto unmap_drop;
}
- buf = &tx->info[idx].buf;
tx->info[idx].skb = NULL;
- dma_unmap_len_set(buf, len, len);
- dma_unmap_addr_set(buf, dma, addr);
+ dma_unmap_len_set(&tx->info[idx], len, len);
+ dma_unmap_addr_set(&tx->info[idx], dma, addr);
gve_tx_fill_seg_desc(seg_desc, skb, is_gso, len, addr);
}
@@ -579,7 +582,7 @@ netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev)
WARN(skb_get_queue_mapping(skb) >= priv->tx_cfg.num_queues,
"skb queue index out of range");
tx = &priv->tx[skb_get_queue_mapping(skb)];
- if (unlikely(gve_maybe_stop_tx(tx, skb))) {
+ if (unlikely(gve_maybe_stop_tx(priv, tx, skb))) {
/* We need to ring the txq doorbell -- we have stopped the Tx
* queue for want of resources, but prior calls to gve_tx()
* may have added descriptors without ringing the doorbell.
@@ -675,19 +678,19 @@ static int gve_clean_tx_done(struct gve_priv *priv, struct gve_tx_ring *tx,
return pkts;
}
-__be32 gve_tx_load_event_counter(struct gve_priv *priv,
- struct gve_tx_ring *tx)
+u32 gve_tx_load_event_counter(struct gve_priv *priv,
+ struct gve_tx_ring *tx)
{
- u32 counter_index = be32_to_cpu((tx->q_resources->counter_index));
+ u32 counter_index = be32_to_cpu(tx->q_resources->counter_index);
+ __be32 counter = READ_ONCE(priv->counter_array[counter_index]);
- return READ_ONCE(priv->counter_array[counter_index]);
+ return be32_to_cpu(counter);
}
bool gve_tx_poll(struct gve_notify_block *block, int budget)
{
struct gve_priv *priv = block->priv;
struct gve_tx_ring *tx = block->tx;
- bool repoll = false;
u32 nic_done;
u32 to_do;
@@ -695,17 +698,23 @@ bool gve_tx_poll(struct gve_notify_block *block, int budget)
if (budget == 0)
budget = INT_MAX;
+ /* In TX path, it may try to clean completed pkts in order to xmit,
+ * to avoid cleaning conflict, use spin_lock(), it yields better
+ * concurrency between xmit/clean than netif's lock.
+ */
+ spin_lock(&tx->clean_lock);
/* Find out how much work there is to be done */
- tx->last_nic_done = gve_tx_load_event_counter(priv, tx);
- nic_done = be32_to_cpu(tx->last_nic_done);
- if (budget > 0) {
- /* Do as much work as we have that the budget will
- * allow
- */
- to_do = min_t(u32, (nic_done - tx->done), budget);
- gve_clean_tx_done(priv, tx, to_do, true);
- }
+ nic_done = gve_tx_load_event_counter(priv, tx);
+ to_do = min_t(u32, (nic_done - tx->done), budget);
+ gve_clean_tx_done(priv, tx, to_do, true);
+ spin_unlock(&tx->clean_lock);
/* If we still have work we want to repoll */
- repoll |= (nic_done != tx->done);
- return repoll;
+ return nic_done != tx->done;
+}
+
+bool gve_tx_clean_pending(struct gve_priv *priv, struct gve_tx_ring *tx)
+{
+ u32 nic_done = gve_tx_load_event_counter(priv, tx);
+
+ return nic_done != tx->done;
}
diff --git a/drivers/net/ethernet/google/gve/gve_tx_dqo.c b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
index 05ddb6a75c38..ec394d991668 100644
--- a/drivers/net/ethernet/google/gve/gve_tx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
@@ -85,18 +85,16 @@ static void gve_tx_clean_pending_packets(struct gve_tx_ring *tx)
int j;
for (j = 0; j < cur_state->num_bufs; j++) {
- struct gve_tx_dma_buf *buf = &cur_state->bufs[j];
-
if (j == 0) {
dma_unmap_single(tx->dev,
- dma_unmap_addr(buf, dma),
- dma_unmap_len(buf, len),
- DMA_TO_DEVICE);
+ dma_unmap_addr(cur_state, dma[j]),
+ dma_unmap_len(cur_state, len[j]),
+ DMA_TO_DEVICE);
} else {
dma_unmap_page(tx->dev,
- dma_unmap_addr(buf, dma),
- dma_unmap_len(buf, len),
- DMA_TO_DEVICE);
+ dma_unmap_addr(cur_state, dma[j]),
+ dma_unmap_len(cur_state, len[j]),
+ DMA_TO_DEVICE);
}
}
if (cur_state->skb) {
@@ -457,15 +455,15 @@ static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx,
const bool is_gso = skb_is_gso(skb);
u32 desc_idx = tx->dqo_tx.tail;
- struct gve_tx_pending_packet_dqo *pending_packet;
+ struct gve_tx_pending_packet_dqo *pkt;
struct gve_tx_metadata_dqo metadata;
s16 completion_tag;
int i;
- pending_packet = gve_alloc_pending_packet(tx);
- pending_packet->skb = skb;
- pending_packet->num_bufs = 0;
- completion_tag = pending_packet - tx->dqo.pending_packets;
+ pkt = gve_alloc_pending_packet(tx);
+ pkt->skb = skb;
+ pkt->num_bufs = 0;
+ completion_tag = pkt - tx->dqo.pending_packets;
gve_extract_tx_metadata_dqo(skb, &metadata);
if (is_gso) {
@@ -493,8 +491,6 @@ static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx,
/* Map the linear portion of skb */
{
- struct gve_tx_dma_buf *buf =
- &pending_packet->bufs[pending_packet->num_bufs];
u32 len = skb_headlen(skb);
dma_addr_t addr;
@@ -502,9 +498,9 @@ static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx,
if (unlikely(dma_mapping_error(tx->dev, addr)))
goto err;
- dma_unmap_len_set(buf, len, len);
- dma_unmap_addr_set(buf, dma, addr);
- ++pending_packet->num_bufs;
+ dma_unmap_len_set(pkt, len[pkt->num_bufs], len);
+ dma_unmap_addr_set(pkt, dma[pkt->num_bufs], addr);
+ ++pkt->num_bufs;
gve_tx_fill_pkt_desc_dqo(tx, &desc_idx, skb, len, addr,
completion_tag,
@@ -512,8 +508,6 @@ static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx,
}
for (i = 0; i < shinfo->nr_frags; i++) {
- struct gve_tx_dma_buf *buf =
- &pending_packet->bufs[pending_packet->num_bufs];
const skb_frag_t *frag = &shinfo->frags[i];
bool is_eop = i == (shinfo->nr_frags - 1);
u32 len = skb_frag_size(frag);
@@ -523,9 +517,9 @@ static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx,
if (unlikely(dma_mapping_error(tx->dev, addr)))
goto err;
- dma_unmap_len_set(buf, len, len);
- dma_unmap_addr_set(buf, dma, addr);
- ++pending_packet->num_bufs;
+ dma_unmap_len_set(pkt, len[pkt->num_bufs], len);
+ dma_unmap_addr_set(pkt, dma[pkt->num_bufs], addr);
+ ++pkt->num_bufs;
gve_tx_fill_pkt_desc_dqo(tx, &desc_idx, skb, len, addr,
completion_tag, is_eop, is_gso);
@@ -552,22 +546,23 @@ static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx,
return 0;
err:
- for (i = 0; i < pending_packet->num_bufs; i++) {
- struct gve_tx_dma_buf *buf = &pending_packet->bufs[i];
-
+ for (i = 0; i < pkt->num_bufs; i++) {
if (i == 0) {
- dma_unmap_single(tx->dev, dma_unmap_addr(buf, dma),
- dma_unmap_len(buf, len),
+ dma_unmap_single(tx->dev,
+ dma_unmap_addr(pkt, dma[i]),
+ dma_unmap_len(pkt, len[i]),
DMA_TO_DEVICE);
} else {
- dma_unmap_page(tx->dev, dma_unmap_addr(buf, dma),
- dma_unmap_len(buf, len), DMA_TO_DEVICE);
+ dma_unmap_page(tx->dev,
+ dma_unmap_addr(pkt, dma[i]),
+ dma_unmap_len(pkt, len[i]),
+ DMA_TO_DEVICE);
}
}
- pending_packet->skb = NULL;
- pending_packet->num_bufs = 0;
- gve_free_pending_packet(tx, pending_packet);
+ pkt->skb = NULL;
+ pkt->num_bufs = 0;
+ gve_free_pending_packet(tx, pkt);
return -1;
}
@@ -725,12 +720,12 @@ static void add_to_list(struct gve_tx_ring *tx, struct gve_index_list *list,
static void remove_from_list(struct gve_tx_ring *tx,
struct gve_index_list *list,
- struct gve_tx_pending_packet_dqo *pending_packet)
+ struct gve_tx_pending_packet_dqo *pkt)
{
s16 prev_index, next_index;
- prev_index = pending_packet->prev;
- next_index = pending_packet->next;
+ prev_index = pkt->prev;
+ next_index = pkt->next;
if (prev_index == -1) {
/* Node is head */
@@ -747,21 +742,18 @@ static void remove_from_list(struct gve_tx_ring *tx,
}
static void gve_unmap_packet(struct device *dev,
- struct gve_tx_pending_packet_dqo *pending_packet)
+ struct gve_tx_pending_packet_dqo *pkt)
{
- struct gve_tx_dma_buf *buf;
int i;
/* SKB linear portion is guaranteed to be mapped */
- buf = &pending_packet->bufs[0];
- dma_unmap_single(dev, dma_unmap_addr(buf, dma),
- dma_unmap_len(buf, len), DMA_TO_DEVICE);
- for (i = 1; i < pending_packet->num_bufs; i++) {
- buf = &pending_packet->bufs[i];
- dma_unmap_page(dev, dma_unmap_addr(buf, dma),
- dma_unmap_len(buf, len), DMA_TO_DEVICE);
+ dma_unmap_single(dev, dma_unmap_addr(pkt, dma[0]),
+ dma_unmap_len(pkt, len[0]), DMA_TO_DEVICE);
+ for (i = 1; i < pkt->num_bufs; i++) {
+ dma_unmap_page(dev, dma_unmap_addr(pkt, dma[i]),
+ dma_unmap_len(pkt, len[i]), DMA_TO_DEVICE);
}
- pending_packet->num_bufs = 0;
+ pkt->num_bufs = 0;
}
/* Completion types and expected behavior:
diff --git a/drivers/net/ethernet/google/gve/gve_utils.c b/drivers/net/ethernet/google/gve/gve_utils.c
index 93f3dcbeeea9..45ff7a9ab5f9 100644
--- a/drivers/net/ethernet/google/gve/gve_utils.c
+++ b/drivers/net/ethernet/google/gve/gve_utils.c
@@ -18,12 +18,16 @@ void gve_tx_remove_from_block(struct gve_priv *priv, int queue_idx)
void gve_tx_add_to_block(struct gve_priv *priv, int queue_idx)
{
+ unsigned int active_cpus = min_t(int, priv->num_ntfy_blks / 2,
+ num_online_cpus());
int ntfy_idx = gve_tx_idx_to_ntfy(priv, queue_idx);
struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
struct gve_tx_ring *tx = &priv->tx[queue_idx];
block->tx = tx;
tx->ntfy_id = ntfy_idx;
+ netif_set_xps_queue(priv->dev, get_cpu_mask(ntfy_idx % active_cpus),
+ queue_idx);
}
void gve_rx_remove_from_block(struct gve_priv *priv, int queue_idx)
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
index 37b605fed32c..c84ef494bd60 100644
--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
+++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
@@ -998,7 +998,7 @@ static int hip04_mac_probe(struct platform_device *pdev)
hip04_config_port(ndev, SPEED_100, DUPLEX_FULL);
hip04_config_fifo(priv);
- eth_random_addr(ndev->dev_addr);
+ eth_hw_addr_random(ndev);
hip04_update_mac_address(ndev);
ret = hip04_alloc_ring(ndev, d);
diff --git a/drivers/net/ethernet/hisilicon/hisi_femac.c b/drivers/net/ethernet/hisilicon/hisi_femac.c
index 22bf914f2dbd..a6c18b6527f9 100644
--- a/drivers/net/ethernet/hisilicon/hisi_femac.c
+++ b/drivers/net/ethernet/hisilicon/hisi_femac.c
@@ -427,7 +427,7 @@ static void hisi_femac_free_skb_rings(struct hisi_femac_priv *priv)
}
static int hisi_femac_set_hw_mac_addr(struct hisi_femac_priv *priv,
- unsigned char *mac)
+ const unsigned char *mac)
{
u32 reg;
@@ -555,7 +555,7 @@ static int hisi_femac_set_mac_address(struct net_device *dev, void *p)
if (!is_valid_ether_addr(skaddr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(dev->dev_addr, skaddr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, skaddr->sa_data);
dev->addr_assign_type &= ~NET_ADDR_RANDOM;
hisi_femac_set_hw_mac_addr(priv, dev->dev_addr);
@@ -841,7 +841,7 @@ static int hisi_femac_drv_probe(struct platform_device *pdev)
(unsigned long)phy->phy_id,
phy_modes(phy->interface));
- ret = of_get_mac_address(node, ndev->dev_addr);
+ ret = of_get_ethdev_address(node, ndev);
if (ret) {
eth_hw_addr_random(ndev);
dev_warn(dev, "using random MAC address %pM\n",
diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
index c1aae0fca5e9..d7e62eca050f 100644
--- a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
@@ -429,7 +429,7 @@ static void hix5hd2_port_disable(struct hix5hd2_priv *priv)
static void hix5hd2_hw_set_mac_addr(struct net_device *dev)
{
struct hix5hd2_priv *priv = netdev_priv(dev);
- unsigned char *mac = dev->dev_addr;
+ const unsigned char *mac = dev->dev_addr;
u32 val;
val = mac[1] | (mac[0] << 8);
@@ -1219,7 +1219,7 @@ static int hix5hd2_dev_probe(struct platform_device *pdev)
goto out_phy_node;
}
- ret = of_get_mac_address(node, ndev->dev_addr);
+ ret = of_get_ethdev_address(node, ndev);
if (ret) {
eth_hw_addr_random(ndev);
netdev_warn(ndev, "using random MAC address %pM\n",
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h
index 2b7db1c22321..d72657444ef3 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.h
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.h
@@ -499,7 +499,7 @@ struct hnae_ae_ops {
u32 *tx_usecs_high, u32 *rx_usecs_high);
void (*set_promisc_mode)(struct hnae_handle *handle, u32 en);
int (*get_mac_addr)(struct hnae_handle *handle, void **p);
- int (*set_mac_addr)(struct hnae_handle *handle, void *p);
+ int (*set_mac_addr)(struct hnae_handle *handle, const void *p);
int (*add_uc_addr)(struct hnae_handle *handle,
const unsigned char *addr);
int (*rm_uc_addr)(struct hnae_handle *handle,
@@ -558,7 +558,7 @@ struct hnae_handle {
enum hnae_media_type media_type;
struct list_head node; /* list to hnae_ae_dev->handle_list */
struct hnae_buf_ops *bops; /* operation for the buffer */
- struct hnae_queue **qs; /* array base of all queues */
+ struct hnae_queue *qs[]; /* flexible array of all queues */
};
#define ring_to_dev(ring) ((ring)->q->dev->dev)
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
index 75e4ec569da8..bc3e406f0139 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
@@ -81,8 +81,8 @@ static struct hnae_handle *hns_ae_get_handle(struct hnae_ae_dev *dev,
vfnum_per_port = hns_ae_get_vf_num_per_port(dsaf_dev, port_id);
qnum_per_vf = hns_ae_get_q_num_per_vf(dsaf_dev, port_id);
- vf_cb = kzalloc(sizeof(*vf_cb) +
- qnum_per_vf * sizeof(struct hnae_queue *), GFP_KERNEL);
+ vf_cb = kzalloc(struct_size(vf_cb, ae_handle.qs, qnum_per_vf),
+ GFP_KERNEL);
if (unlikely(!vf_cb)) {
dev_err(dsaf_dev->dev, "malloc vf_cb fail!\n");
ae_handle = ERR_PTR(-ENOMEM);
@@ -108,7 +108,6 @@ static struct hnae_handle *hns_ae_get_handle(struct hnae_ae_dev *dev,
goto vf_id_err;
}
- ae_handle->qs = (struct hnae_queue **)(&ae_handle->qs + 1);
for (i = 0; i < qnum_per_vf; i++) {
ae_handle->qs[i] = &ring_pair_cb->q;
ae_handle->qs[i]->rx_ring.q = ae_handle->qs[i];
@@ -207,7 +206,7 @@ static void hns_ae_fini_queue(struct hnae_queue *q)
hns_rcb_reset_ring_hw(q);
}
-static int hns_ae_set_mac_address(struct hnae_handle *handle, void *p)
+static int hns_ae_set_mac_address(struct hnae_handle *handle, const void *p)
{
int ret;
struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
index f387a859a201..8f391e2adcc0 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
@@ -450,7 +450,7 @@ static void hns_gmac_update_stats(void *mac_drv)
+= dsaf_read_dev(drv, GMAC_TX_PAUSE_FRAMES_REG);
}
-static void hns_gmac_set_mac_addr(void *mac_drv, char *mac_addr)
+static void hns_gmac_set_mac_addr(void *mac_drv, const char *mac_addr)
{
struct mac_driver *drv = (struct mac_driver *)mac_drv;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
index f41379de2186..7edf8569514c 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
@@ -240,7 +240,7 @@ int hns_mac_get_inner_port_num(struct hns_mac_cb *mac_cb, u8 vmid, u8 *port_num)
*@addr:mac address
*/
int hns_mac_change_vf_addr(struct hns_mac_cb *mac_cb,
- u32 vmid, char *addr)
+ u32 vmid, const char *addr)
{
int ret;
struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
index 8943ffab4418..e3bb05959ba9 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
@@ -348,7 +348,7 @@ struct mac_driver {
/*disable mac when disable nic or dsaf*/
void (*mac_disable)(void *mac_drv, enum mac_commom_mode mode);
/* config mac address*/
- void (*set_mac_addr)(void *mac_drv, char *mac_addr);
+ void (*set_mac_addr)(void *mac_drv, const char *mac_addr);
/*adjust mac mode of port,include speed and duplex*/
int (*adjust_link)(void *mac_drv, enum mac_speed speed,
u32 full_duplex);
@@ -425,7 +425,8 @@ int hns_mac_init(struct dsaf_device *dsaf_dev);
void mac_adjust_link(struct net_device *net_dev);
bool hns_mac_need_adjust_link(struct hns_mac_cb *mac_cb, int speed, int duplex);
void hns_mac_get_link_status(struct hns_mac_cb *mac_cb, u32 *link_status);
-int hns_mac_change_vf_addr(struct hns_mac_cb *mac_cb, u32 vmid, char *addr);
+int hns_mac_change_vf_addr(struct hns_mac_cb *mac_cb, u32 vmid,
+ const char *addr);
int hns_mac_set_multi(struct hns_mac_cb *mac_cb,
u32 port_num, char *addr, bool enable);
int hns_mac_vm_config_bc_en(struct hns_mac_cb *mac_cb, u32 vm, bool enable);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
index cba04bfa0b3f..5526a10caac5 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
@@ -210,7 +210,7 @@ struct hnae_vf_cb {
u8 port_index;
struct hns_mac_cb *mac_cb;
struct dsaf_device *dsaf_dev;
- struct hnae_handle ae_handle; /* must be the last number */
+ struct hnae_handle ae_handle; /* must be the last member */
};
struct dsaf_int_xge_src {
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
index 401fef5f1d07..fc26ffaae620 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
@@ -255,7 +255,7 @@ static void hns_xgmac_pausefrm_cfg(void *mac_drv, u32 rx_en, u32 tx_en)
dsaf_write_dev(drv, XGMAC_MAC_PAUSE_CTRL_REG, origin);
}
-static void hns_xgmac_set_pausefrm_mac_addr(void *mac_drv, char *mac_addr)
+static void hns_xgmac_set_pausefrm_mac_addr(void *mac_drv, const char *mac_addr)
{
struct mac_driver *drv = (struct mac_driver *)mac_drv;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index 343c605c4be8..22a463e15678 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -1194,7 +1194,7 @@ static int hns_nic_net_set_mac_address(struct net_device *ndev, void *p)
return ret;
}
- memcpy(ndev->dev_addr, mac_addr->sa_data, ndev->addr_len);
+ eth_hw_addr_set(ndev, mac_addr->sa_data);
return 0;
}
@@ -1212,7 +1212,7 @@ static void hns_init_mac_addr(struct net_device *ndev)
{
struct hns_nic_priv *priv = netdev_priv(ndev);
- if (!device_get_mac_address(priv->dev, ndev->dev_addr, ETH_ALEN)) {
+ if (device_get_ethdev_address(priv->dev, ndev)) {
eth_hw_addr_random(ndev);
dev_warn(priv->dev, "No valid mac, use random mac %pM",
ndev->dev_addr);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index d701451596c8..30a3954b78e0 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -95,6 +95,7 @@ enum HNAE3_DEV_CAP_BITS {
HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B,
HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B,
HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B,
+ HNAE3_DEV_SUPPORT_MC_MAC_MNG_B,
};
#define hnae3_dev_fd_supported(hdev) \
@@ -151,6 +152,9 @@ enum HNAE3_DEV_CAP_BITS {
#define hnae3_ae_dev_rxd_adv_layout_supported(ae_dev) \
test_bit(HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B, (ae_dev)->caps)
+#define hnae3_ae_dev_mc_mac_mng_supported(ae_dev) \
+ test_bit(HNAE3_DEV_SUPPORT_MC_MAC_MNG_B, (ae_dev)->caps)
+
enum HNAE3_PF_CAP_BITS {
HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B = 0,
};
@@ -294,6 +298,7 @@ enum hnae3_dbg_cmd {
HNAE3_DBG_CMD_MAC_TNL_STATUS,
HNAE3_DBG_CMD_SERV_INFO,
HNAE3_DBG_CMD_UMV_INFO,
+ HNAE3_DBG_CMD_PAGE_POOL_INFO,
HNAE3_DBG_CMD_UNKNOWN,
};
@@ -341,6 +346,8 @@ struct hnae3_dev_specs {
u8 max_non_tso_bd_num; /* max BD number of one non-TSO packet */
u16 max_frm_size;
u16 max_qset_num;
+ u16 umv_size;
+ u16 mc_mac_size;
};
struct hnae3_client_ops {
@@ -588,7 +595,7 @@ struct hnae3_ae_ops {
u32 *tx_usecs_high, u32 *rx_usecs_high);
void (*get_mac_addr)(struct hnae3_handle *handle, u8 *p);
- int (*set_mac_addr)(struct hnae3_handle *handle, void *p,
+ int (*set_mac_addr)(struct hnae3_handle *handle, const void *p,
bool is_first);
int (*do_ioctl)(struct hnae3_handle *handle,
struct ifreq *ifr, int cmd);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
index 2b66c59f5eaf..b26d43c9c088 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
@@ -336,6 +336,13 @@ static struct hns3_dbg_cmd_info hns3_dbg_cmd[] = {
.buf_len = HNS3_DBG_READ_LEN,
.init = hns3_dbg_common_file_init,
},
+ {
+ .name = "page_pool_info",
+ .cmd = HNAE3_DBG_CMD_PAGE_POOL_INFO,
+ .dentry = HNS3_DBG_DENTRY_COMMON,
+ .buf_len = HNS3_DBG_READ_LEN,
+ .init = hns3_dbg_common_file_init,
+ },
};
static struct hns3_dbg_cap_info hns3_dbg_cap[] = {
@@ -924,6 +931,10 @@ hns3_dbg_dev_specs(struct hnae3_handle *h, char *buf, int len, int *pos)
dev_specs->max_tm_rate);
*pos += scnprintf(buf + *pos, len - *pos, "MAX QSET number: %u\n",
dev_specs->max_qset_num);
+ *pos += scnprintf(buf + *pos, len - *pos, "umv size: %u\n",
+ dev_specs->umv_size);
+ *pos += scnprintf(buf + *pos, len - *pos, "mc mac size: %u\n",
+ dev_specs->mc_mac_size);
}
static int hns3_dbg_dev_info(struct hnae3_handle *h, char *buf, int len)
@@ -937,6 +948,69 @@ static int hns3_dbg_dev_info(struct hnae3_handle *h, char *buf, int len)
return 0;
}
+static const struct hns3_dbg_item page_pool_info_items[] = {
+ { "QUEUE_ID", 2 },
+ { "ALLOCATE_CNT", 2 },
+ { "FREE_CNT", 6 },
+ { "POOL_SIZE(PAGE_NUM)", 2 },
+ { "ORDER", 2 },
+ { "NUMA_ID", 2 },
+ { "MAX_LEN", 2 },
+};
+
+static void hns3_dump_page_pool_info(struct hns3_enet_ring *ring,
+ char **result, u32 index)
+{
+ u32 j = 0;
+
+ sprintf(result[j++], "%u", index);
+ sprintf(result[j++], "%u", ring->page_pool->pages_state_hold_cnt);
+ sprintf(result[j++], "%u",
+ atomic_read(&ring->page_pool->pages_state_release_cnt));
+ sprintf(result[j++], "%u", ring->page_pool->p.pool_size);
+ sprintf(result[j++], "%u", ring->page_pool->p.order);
+ sprintf(result[j++], "%d", ring->page_pool->p.nid);
+ sprintf(result[j++], "%uK", ring->page_pool->p.max_len / 1024);
+}
+
+static int
+hns3_dbg_page_pool_info(struct hnae3_handle *h, char *buf, int len)
+{
+ char data_str[ARRAY_SIZE(page_pool_info_items)][HNS3_DBG_DATA_STR_LEN];
+ char *result[ARRAY_SIZE(page_pool_info_items)];
+ struct hns3_nic_priv *priv = h->priv;
+ char content[HNS3_DBG_INFO_LEN];
+ struct hns3_enet_ring *ring;
+ int pos = 0;
+ u32 i;
+
+ if (!priv->ring) {
+ dev_err(&h->pdev->dev, "priv->ring is NULL\n");
+ return -EFAULT;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(page_pool_info_items); i++)
+ result[i] = &data_str[i][0];
+
+ hns3_dbg_fill_content(content, sizeof(content), page_pool_info_items,
+ NULL, ARRAY_SIZE(page_pool_info_items));
+ pos += scnprintf(buf + pos, len - pos, "%s", content);
+ for (i = 0; i < h->kinfo.num_tqps; i++) {
+ if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state) ||
+ test_bit(HNS3_NIC_STATE_RESETTING, &priv->state))
+ return -EPERM;
+ ring = &priv->ring[(u32)(i + h->kinfo.num_tqps)];
+ hns3_dump_page_pool_info(ring, result, i);
+ hns3_dbg_fill_content(content, sizeof(content),
+ page_pool_info_items,
+ (const char **)result,
+ ARRAY_SIZE(page_pool_info_items));
+ pos += scnprintf(buf + pos, len - pos, "%s", content);
+ }
+
+ return 0;
+}
+
static int hns3_dbg_get_cmd_index(struct hns3_dbg_data *dbg_data, u32 *index)
{
u32 i;
@@ -978,6 +1052,10 @@ static const struct hns3_dbg_func hns3_dbg_cmd_func[] = {
.cmd = HNAE3_DBG_CMD_TX_QUEUE_INFO,
.dbg_dump = hns3_dbg_tx_queue_info,
},
+ {
+ .cmd = HNAE3_DBG_CMD_PAGE_POOL_INFO,
+ .dbg_dump = hns3_dbg_page_pool_info,
+ },
};
static int hns3_dbg_read_cmd(struct hns3_dbg_data *dbg_data,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 4b886a13e079..a2b993d62822 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -2284,7 +2284,7 @@ static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p)
return ret;
}
- ether_addr_copy(netdev->dev_addr, mac_addr->sa_data);
+ eth_hw_addr_set(netdev, mac_addr->sa_data);
return 0;
}
@@ -4940,7 +4940,7 @@ static int hns3_init_mac_addr(struct net_device *netdev)
dev_warn(priv->dev, "using random MAC address %pM\n",
netdev->dev_addr);
} else if (!ether_addr_equal(netdev->dev_addr, mac_addr_temp)) {
- ether_addr_copy(netdev->dev_addr, mac_addr_temp);
+ eth_hw_addr_set(netdev, mac_addr_temp);
ether_addr_copy(netdev->perm_addr, mac_addr_temp);
} else {
return 0;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
index 33244472e0d0..bfcfefa9d2b5 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
@@ -1188,7 +1188,10 @@ struct hclge_dev_specs_1_cmd {
__le16 max_frm_size;
__le16 max_qset_num;
__le16 max_int_gl;
- u8 rsv1[18];
+ u8 rsv0[2];
+ __le16 umv_size;
+ __le16 mc_mac_size;
+ u8 rsv1[12];
};
/* mac speed type defined in firmware command */
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
index 32f62cd2dd99..f0aa4fbd2200 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
@@ -1992,6 +1992,9 @@ static int hclge_dbg_dump_umv_info(struct hclge_dev *hdev, char *buf, int len)
}
mutex_unlock(&hdev->vport_lock);
+ pos += scnprintf(buf + pos, len - pos, "used_mc_mac_num : %u\n",
+ hdev->used_mc_mac_num);
+
return 0;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c
index e4aad695abcc..4c441e6a5082 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c
@@ -109,7 +109,6 @@ int hclge_devlink_init(struct hclge_dev *hdev)
struct pci_dev *pdev = hdev->pdev;
struct hclge_devlink_priv *priv;
struct devlink *devlink;
- int ret;
devlink = devlink_alloc(&hclge_devlink_ops,
sizeof(struct hclge_devlink_priv), &pdev->dev);
@@ -120,28 +119,15 @@ int hclge_devlink_init(struct hclge_dev *hdev)
priv->hdev = hdev;
hdev->devlink = devlink;
- ret = devlink_register(devlink);
- if (ret) {
- dev_err(&pdev->dev, "failed to register devlink, ret = %d\n",
- ret);
- goto out_reg_fail;
- }
-
- devlink_reload_enable(devlink);
-
+ devlink_set_features(devlink, DEVLINK_F_RELOAD);
+ devlink_register(devlink);
return 0;
-
-out_reg_fail:
- devlink_free(devlink);
- return ret;
}
void hclge_devlink_uninit(struct hclge_dev *hdev)
{
struct devlink *devlink = hdev->devlink;
- devlink_reload_disable(devlink);
-
devlink_unregister(devlink);
devlink_free(devlink);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index dcd40cc73082..be6f0a6229aa 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -1342,8 +1342,6 @@ static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
HCLGE_CFG_UMV_TBL_SPACE_M,
HCLGE_CFG_UMV_TBL_SPACE_S);
- if (!cfg->umv_space)
- cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
cfg->pf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[2]),
HCLGE_CFG_PF_RSS_SIZE_M,
@@ -1419,6 +1417,7 @@ static void hclge_set_default_dev_specs(struct hclge_dev *hdev)
ae_dev->dev_specs.max_int_gl = HCLGE_DEF_MAX_INT_GL;
ae_dev->dev_specs.max_frm_size = HCLGE_MAC_MAX_FRAME;
ae_dev->dev_specs.max_qset_num = HCLGE_MAX_QSET_NUM;
+ ae_dev->dev_specs.umv_size = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
}
static void hclge_parse_dev_specs(struct hclge_dev *hdev,
@@ -1440,6 +1439,8 @@ static void hclge_parse_dev_specs(struct hclge_dev *hdev,
ae_dev->dev_specs.max_qset_num = le16_to_cpu(req1->max_qset_num);
ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl);
ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size);
+ ae_dev->dev_specs.umv_size = le16_to_cpu(req1->umv_size);
+ ae_dev->dev_specs.mc_mac_size = le16_to_cpu(req1->mc_mac_size);
}
static void hclge_check_dev_specs(struct hclge_dev *hdev)
@@ -1460,6 +1461,8 @@ static void hclge_check_dev_specs(struct hclge_dev *hdev)
dev_specs->max_int_gl = HCLGE_DEF_MAX_INT_GL;
if (!dev_specs->max_frm_size)
dev_specs->max_frm_size = HCLGE_MAC_MAX_FRAME;
+ if (!dev_specs->umv_size)
+ dev_specs->umv_size = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
}
static int hclge_query_dev_specs(struct hclge_dev *hdev)
@@ -1549,7 +1552,10 @@ static int hclge_configure(struct hclge_dev *hdev)
hdev->tm_info.num_pg = 1;
hdev->tc_max = cfg.tc_num;
hdev->tm_info.hw_pfc_map = 0;
- hdev->wanted_umv_size = cfg.umv_space;
+ if (cfg.umv_space)
+ hdev->wanted_umv_size = cfg.umv_space;
+ else
+ hdev->wanted_umv_size = hdev->ae_dev->dev_specs.umv_size;
hdev->tx_spare_buf_size = cfg.tx_spare_buf_size;
hdev->gro_en = true;
if (cfg.vlan_fliter_cap == HCLGE_VLAN_FLTR_CAN_MDF)
@@ -8498,6 +8504,9 @@ static int hclge_init_umv_space(struct hclge_dev *hdev)
hdev->share_umv_size = hdev->priv_umv_size +
hdev->max_umv_size % (hdev->num_alloc_vport + 1);
+ if (hdev->ae_dev->dev_specs.mc_mac_size)
+ set_bit(HNAE3_DEV_SUPPORT_MC_MAC_MNG_B, hdev->ae_dev->caps);
+
return 0;
}
@@ -8515,6 +8524,8 @@ static void hclge_reset_umv_space(struct hclge_dev *hdev)
hdev->share_umv_size = hdev->priv_umv_size +
hdev->max_umv_size % (hdev->num_alloc_vport + 1);
mutex_unlock(&hdev->vport_lock);
+
+ hdev->used_mc_mac_num = 0;
}
static bool hclge_is_umv_space_full(struct hclge_vport *vport, bool need_lock)
@@ -8769,6 +8780,7 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport,
struct hclge_dev *hdev = vport->back;
struct hclge_mac_vlan_tbl_entry_cmd req;
struct hclge_desc desc[3];
+ bool is_new_addr = false;
int status;
/* mac addr check */
@@ -8782,6 +8794,13 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport,
hclge_prepare_mac_addr(&req, addr, true);
status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
if (status) {
+ if (hnae3_ae_dev_mc_mac_mng_supported(hdev->ae_dev) &&
+ hdev->used_mc_mac_num >=
+ hdev->ae_dev->dev_specs.mc_mac_size)
+ goto err_no_space;
+
+ is_new_addr = true;
+
/* This mac addr do not exist, add new entry for it */
memset(desc[0].data, 0, sizeof(desc[0].data));
memset(desc[1].data, 0, sizeof(desc[0].data));
@@ -8791,12 +8810,18 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport,
if (status)
return status;
status = hclge_add_mac_vlan_tbl(vport, &req, desc);
- /* if already overflow, not to print each time */
- if (status == -ENOSPC &&
- !(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE))
- dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
+ if (status == -ENOSPC)
+ goto err_no_space;
+ else if (!status && is_new_addr)
+ hdev->used_mc_mac_num++;
return status;
+
+err_no_space:
+ /* if already overflow, not to print each time */
+ if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE))
+ dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
+ return -ENOSPC;
}
static int hclge_rm_mc_addr(struct hnae3_handle *handle,
@@ -8833,12 +8858,15 @@ int hclge_rm_mc_addr_common(struct hclge_vport *vport,
if (status)
return status;
- if (hclge_is_all_function_id_zero(desc))
+ if (hclge_is_all_function_id_zero(desc)) {
/* All the vfid is zero, so need to delete this entry */
status = hclge_remove_mac_vlan_tbl(vport, &req);
- else
+ if (!status)
+ hdev->used_mc_mac_num--;
+ } else {
/* Not all the vfid is zero, update the vfid */
status = hclge_add_mac_vlan_tbl(vport, &req, desc);
+ }
} else if (status == -ENOENT) {
status = 0;
}
@@ -9414,7 +9442,7 @@ int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport,
return 0;
}
-static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
+static int hclge_set_mac_addr(struct hnae3_handle *handle, const void *p,
bool is_first)
{
const unsigned char *new_addr = (const unsigned char *)p;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index de6afbcbfbac..ca25e2edf3f0 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -938,6 +938,8 @@ struct hclge_dev {
u16 priv_umv_size;
/* unicast mac vlan space shared by PF and its VFs */
u16 share_umv_size;
+ /* multicast mac address number used by PF and its VFs */
+ u16 used_mc_mac_num;
DECLARE_KFIFO(mac_tnl_log, struct hclge_mac_tnl_stats,
HCLGE_MAC_TNL_LOG_SIZE);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.c
index f478770299c6..fdc19868b818 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.c
@@ -110,7 +110,6 @@ int hclgevf_devlink_init(struct hclgevf_dev *hdev)
struct pci_dev *pdev = hdev->pdev;
struct hclgevf_devlink_priv *priv;
struct devlink *devlink;
- int ret;
devlink =
devlink_alloc(&hclgevf_devlink_ops,
@@ -122,28 +121,15 @@ int hclgevf_devlink_init(struct hclgevf_dev *hdev)
priv->hdev = hdev;
hdev->devlink = devlink;
- ret = devlink_register(devlink);
- if (ret) {
- dev_err(&pdev->dev, "failed to register devlink, ret = %d\n",
- ret);
- goto out_reg_fail;
- }
-
- devlink_reload_enable(devlink);
-
+ devlink_set_features(devlink, DEVLINK_F_RELOAD);
+ devlink_register(devlink);
return 0;
-
-out_reg_fail:
- devlink_free(devlink);
- return ret;
}
void hclgevf_devlink_uninit(struct hclgevf_dev *hdev)
{
struct devlink *devlink = hdev->devlink;
- devlink_reload_disable(devlink);
-
devlink_unregister(devlink);
devlink_free(devlink);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index bef6b98e2f50..3306050ad72c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -1349,7 +1349,7 @@ static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p)
ether_addr_copy(p, hdev->hw.mac.mac_addr);
}
-static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p,
+static int hclgevf_set_mac_addr(struct hnae3_handle *handle, const void *p,
bool is_first)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_devlink.c b/drivers/net/ethernet/huawei/hinic/hinic_devlink.c
index 6e11ee339f12..60ae8bfc5f69 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_devlink.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_devlink.c
@@ -303,11 +303,11 @@ void hinic_devlink_free(struct devlink *devlink)
devlink_free(devlink);
}
-int hinic_devlink_register(struct hinic_devlink_priv *priv)
+void hinic_devlink_register(struct hinic_devlink_priv *priv)
{
struct devlink *devlink = priv_to_devlink(priv);
- return devlink_register(devlink);
+ devlink_register(devlink);
}
void hinic_devlink_unregister(struct hinic_devlink_priv *priv)
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_devlink.h b/drivers/net/ethernet/huawei/hinic/hinic_devlink.h
index 9e315011015c..46760d607b9b 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_devlink.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_devlink.h
@@ -110,7 +110,7 @@ struct host_image_st {
struct devlink *hinic_devlink_alloc(struct device *dev);
void hinic_devlink_free(struct devlink *devlink);
-int hinic_devlink_register(struct hinic_devlink_priv *priv);
+void hinic_devlink_register(struct hinic_devlink_priv *priv);
void hinic_devlink_unregister(struct hinic_devlink_priv *priv);
int hinic_health_reporters_create(struct hinic_devlink_priv *priv);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
index 56b6b04e209b..657a15447bd0 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
@@ -754,17 +754,9 @@ static int init_pfhwdev(struct hinic_pfhwdev *pfhwdev)
return err;
}
- err = hinic_devlink_register(hwdev->devlink_dev);
- if (err) {
- dev_err(&hwif->pdev->dev, "Failed to register devlink\n");
- hinic_pf_to_mgmt_free(&pfhwdev->pf_to_mgmt);
- return err;
- }
-
err = hinic_func_to_func_init(hwdev);
if (err) {
dev_err(&hwif->pdev->dev, "Failed to init mailbox\n");
- hinic_devlink_unregister(hwdev->devlink_dev);
hinic_pf_to_mgmt_free(&pfhwdev->pf_to_mgmt);
return err;
}
@@ -787,7 +779,7 @@ static int init_pfhwdev(struct hinic_pfhwdev *pfhwdev)
}
hinic_set_pf_action(hwif, HINIC_PF_MGMT_ACTIVE);
-
+ hinic_devlink_register(hwdev->devlink_dev);
return 0;
}
@@ -799,6 +791,7 @@ static void free_pfhwdev(struct hinic_pfhwdev *pfhwdev)
{
struct hinic_hwdev *hwdev = &pfhwdev->hwdev;
+ hinic_devlink_unregister(hwdev->devlink_dev);
hinic_set_pf_action(hwdev->hwif, HINIC_PF_MGMT_INIT);
if (!HINIC_IS_VF(hwdev->hwif)) {
@@ -816,8 +809,6 @@ static void free_pfhwdev(struct hinic_pfhwdev *pfhwdev)
hinic_func_to_func_free(hwdev);
- hinic_devlink_unregister(hwdev->devlink_dev);
-
hinic_pf_to_mgmt_free(&pfhwdev->pf_to_mgmt);
}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c
index ae707e305684..f9a766b8ac43 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_main.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c
@@ -656,7 +656,7 @@ static int hinic_set_mac_addr(struct net_device *netdev, void *addr)
err = change_mac_addr(netdev, new_mac);
if (!err)
- memcpy(netdev->dev_addr, new_mac, ETH_ALEN);
+ eth_hw_addr_set(netdev, new_mac);
return err;
}
@@ -1181,6 +1181,7 @@ static int nic_dev_init(struct pci_dev *pdev)
struct net_device *netdev;
struct hinic_hwdev *hwdev;
struct devlink *devlink;
+ u8 addr[ETH_ALEN];
int err, num_qps;
devlink = hinic_devlink_alloc(&pdev->dev);
@@ -1259,11 +1260,12 @@ static int nic_dev_init(struct pci_dev *pdev)
pci_set_drvdata(pdev, netdev);
- err = hinic_port_get_mac(nic_dev, netdev->dev_addr);
+ err = hinic_port_get_mac(nic_dev, addr);
if (err) {
dev_err(&pdev->dev, "Failed to get mac address\n");
goto err_get_mac;
}
+ eth_hw_addr_set(netdev, addr);
if (!is_valid_ether_addr(netdev->dev_addr)) {
if (!HINIC_IS_VF(nic_dev->hwdev->hwif)) {
@@ -1379,10 +1381,8 @@ static int hinic_probe(struct pci_dev *pdev,
{
int err = pci_enable_device(pdev);
- if (err) {
- dev_err(&pdev->dev, "Failed to enable PCI device\n");
- return err;
- }
+ if (err)
+ return dev_err_probe(&pdev->dev, err, "Failed to enable PCI device\n");
err = pci_request_regions(pdev, HINIC_DRV_NAME);
if (err) {
diff --git a/drivers/net/ethernet/i825xx/sun3_82586.c b/drivers/net/ethernet/i825xx/sun3_82586.c
index 0696f723228a..3909c6a0af89 100644
--- a/drivers/net/ethernet/i825xx/sun3_82586.c
+++ b/drivers/net/ethernet/i825xx/sun3_82586.c
@@ -339,14 +339,13 @@ static const struct net_device_ops sun3_82586_netdev_ops = {
static int __init sun3_82586_probe1(struct net_device *dev,int ioaddr)
{
- int i, size, retval;
+ int size, retval;
if (!request_region(ioaddr, SUN3_82586_TOTAL_SIZE, DRV_NAME))
return -EBUSY;
/* copy in the ethernet address from the prom */
- for(i = 0; i < 6 ; i++)
- dev->dev_addr[i] = idprom->id_ethaddr[i];
+ eth_hw_addr_set(dev, idprom->id_ethaddr);
printk("%s: SUN3 Intel 82586 found at %lx, ",dev->name,dev->base_addr);
@@ -461,7 +460,7 @@ static int init586(struct net_device *dev)
ias_cmd->cmd_cmd = swab16(CMD_IASETUP | CMD_LAST);
ias_cmd->cmd_link = 0xffff;
- memcpy((char *)&ias_cmd->iaddr,(char *) dev->dev_addr,ETH_ALEN);
+ memcpy((char *)&ias_cmd->iaddr,(const char *) dev->dev_addr,ETH_ALEN);
p->scb->cbl_offset = make16(ias_cmd);
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index d5df131b183c..bad94e4d50f4 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -1741,7 +1741,7 @@ static int ehea_set_mac_addr(struct net_device *dev, void *sa)
goto out_free;
}
- memcpy(dev->dev_addr, mac_addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, mac_addr->sa_data);
/* Deregister old MAC in pHYP */
if (port->state == EHEA_PORT_UP) {
@@ -2986,7 +2986,7 @@ static struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
SET_NETDEV_DEV(dev, port_dev);
/* initialize net_device structure */
- memcpy(dev->dev_addr, &port->mac_addr, ETH_ALEN);
+ eth_hw_addr_set(dev, (u8 *)&port->mac_addr);
dev->netdev_ops = &ehea_netdev_ops;
ehea_set_ethtool_ops(dev);
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index 664a91af662d..6b3fc8823c54 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -1013,7 +1013,7 @@ static int emac_set_mac_address(struct net_device *ndev, void *sa)
mutex_lock(&dev->link_lock);
- memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
+ eth_hw_addr_set(ndev, addr->sa_data);
emac_rx_disable(dev);
emac_tx_disable(dev);
@@ -2848,7 +2848,6 @@ static int emac_init_phy(struct emac_instance *dev)
static int emac_init_config(struct emac_instance *dev)
{
struct device_node *np = dev->ofdev->dev.of_node;
- const void *p;
int err;
/* Read config from device-tree */
@@ -2976,13 +2975,12 @@ static int emac_init_config(struct emac_instance *dev)
}
/* Read MAC-address */
- p = of_get_property(np, "local-mac-address", NULL);
- if (p == NULL) {
- printk(KERN_ERR "%pOF: Can't find local-mac-address property\n",
- np);
- return -ENXIO;
+ err = of_get_ethdev_address(np, dev->ndev);
+ if (err) {
+ if (err != -EPROBE_DEFER)
+ dev_err(&dev->ofdev->dev, "Can't get valid [local-]mac-address from OF !\n");
+ return err;
}
- memcpy(dev->ndev->dev_addr, p, ETH_ALEN);
/* IAHT and GAHT filter parameterization */
if (emac_has_feature(dev, EMAC_FTR_EMAC4SYNC)) {
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 3d9b4f99d357..45ba40cf4d07 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -483,17 +483,6 @@ retry:
return rc;
}
-static u64 ibmveth_encode_mac_addr(u8 *mac)
-{
- int i;
- u64 encoded = 0;
-
- for (i = 0; i < ETH_ALEN; i++)
- encoded = (encoded << 8) | mac[i];
-
- return encoded;
-}
-
static int ibmveth_open(struct net_device *netdev)
{
struct ibmveth_adapter *adapter = netdev_priv(netdev);
@@ -553,7 +542,7 @@ static int ibmveth_open(struct net_device *netdev)
adapter->rx_queue.num_slots = rxq_entries;
adapter->rx_queue.toggle = 1;
- mac_address = ibmveth_encode_mac_addr(netdev->dev_addr);
+ mac_address = ether_addr_to_u64(netdev->dev_addr);
rxq_desc.fields.flags_len = IBMVETH_BUF_VALID |
adapter->rx_queue.queue_len;
@@ -605,17 +594,13 @@ static int ibmveth_open(struct net_device *netdev)
}
rc = -ENOMEM;
- adapter->bounce_buffer =
- kmalloc(netdev->mtu + IBMVETH_BUFF_OH, GFP_KERNEL);
- if (!adapter->bounce_buffer)
- goto out_free_irq;
- adapter->bounce_buffer_dma =
- dma_map_single(&adapter->vdev->dev, adapter->bounce_buffer,
- netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL);
- if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
- netdev_err(netdev, "unable to map bounce buffer\n");
- goto out_free_bounce_buffer;
+ adapter->bounce_buffer = dma_alloc_coherent(&adapter->vdev->dev,
+ netdev->mtu + IBMVETH_BUFF_OH,
+ &adapter->bounce_buffer_dma, GFP_KERNEL);
+ if (!adapter->bounce_buffer) {
+ netdev_err(netdev, "unable to alloc bounce buffer\n");
+ goto out_free_irq;
}
netdev_dbg(netdev, "initial replenish cycle\n");
@@ -627,8 +612,6 @@ static int ibmveth_open(struct net_device *netdev)
return 0;
-out_free_bounce_buffer:
- kfree(adapter->bounce_buffer);
out_free_irq:
free_irq(netdev->irq, netdev);
out_free_buffer_pools:
@@ -702,10 +685,9 @@ static int ibmveth_close(struct net_device *netdev)
ibmveth_free_buffer_pool(adapter,
&adapter->rx_buff_pool[i]);
- dma_unmap_single(&adapter->vdev->dev, adapter->bounce_buffer_dma,
- adapter->netdev->mtu + IBMVETH_BUFF_OH,
- DMA_BIDIRECTIONAL);
- kfree(adapter->bounce_buffer);
+ dma_free_coherent(&adapter->vdev->dev,
+ adapter->netdev->mtu + IBMVETH_BUFF_OH,
+ adapter->bounce_buffer, adapter->bounce_buffer_dma);
netdev_dbg(netdev, "close complete\n");
@@ -1483,7 +1465,7 @@ static void ibmveth_set_multicast_list(struct net_device *netdev)
netdev_for_each_mc_addr(ha, netdev) {
/* add the multicast address to the filter table */
u64 mcast_addr;
- mcast_addr = ibmveth_encode_mac_addr(ha->addr);
+ mcast_addr = ether_addr_to_u64(ha->addr);
lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
IbmVethMcastAddFilter,
mcast_addr);
@@ -1613,14 +1595,14 @@ static int ibmveth_set_mac_addr(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- mac_address = ibmveth_encode_mac_addr(addr->sa_data);
+ mac_address = ether_addr_to_u64(addr->sa_data);
rc = h_change_logical_lan_mac(adapter->vdev->unit_address, mac_address);
if (rc) {
netdev_err(adapter->netdev, "h_change_logical_lan_mac failed with rc=%d\n", rc);
return rc;
}
- ether_addr_copy(dev->dev_addr, addr->sa_data);
+ eth_hw_addr_set(dev, addr->sa_data);
return 0;
}
@@ -1727,7 +1709,7 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
netdev->min_mtu = IBMVETH_MIN_MTU;
netdev->max_mtu = ETH_MAX_MTU - IBMVETH_BUFF_OH;
- memcpy(netdev->dev_addr, mac_addr_p, ETH_ALEN);
+ eth_hw_addr_set(netdev, mac_addr_p);
if (firmware_has_feature(FW_FEATURE_CMO))
memcpy(pool_count, pool_count_cmo, sizeof(pool_count));
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 6aa6ff89a765..9d61167ba767 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -108,6 +108,8 @@ static int init_crq_queue(struct ibmvnic_adapter *adapter);
static int send_query_phys_parms(struct ibmvnic_adapter *adapter);
static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter,
struct ibmvnic_sub_crq_queue *tx_scrq);
+static void free_long_term_buff(struct ibmvnic_adapter *adapter,
+ struct ibmvnic_long_term_buff *ltb);
struct ibmvnic_stat {
char name[ETH_GSTRING_LEN];
@@ -214,22 +216,77 @@ static int ibmvnic_wait_for_completion(struct ibmvnic_adapter *adapter,
return -ETIMEDOUT;
}
+/**
+ * reuse_ltb() - Check if a long term buffer can be reused
+ * @ltb: The long term buffer to be checked
+ * @size: The size of the long term buffer.
+ *
+ * An LTB can be reused unless its size has changed.
+ *
+ * Return: Return true if the LTB can be reused, false otherwise.
+ */
+static bool reuse_ltb(struct ibmvnic_long_term_buff *ltb, int size)
+{
+ return (ltb->buff && ltb->size == size);
+}
+
+/**
+ * alloc_long_term_buff() - Allocate a long term buffer (LTB)
+ *
+ * @adapter: ibmvnic adapter associated to the LTB
+ * @ltb: container object for the LTB
+ * @size: size of the LTB
+ *
+ * Allocate an LTB of the specified size and notify VIOS.
+ *
+ * If the given @ltb already has the correct size, reuse it. Otherwise if
+ * its non-NULL, free it. Then allocate a new one of the correct size.
+ * Notify the VIOS either way since we may now be working with a new VIOS.
+ *
+ * Allocating larger chunks of memory during resets, specially LPM or under
+ * low memory situations can cause resets to fail/timeout and for LPAR to
+ * lose connectivity. So hold onto the LTB even if we fail to communicate
+ * with the VIOS and reuse it on next open. Free LTB when adapter is closed.
+ *
+ * Return: 0 if we were able to allocate the LTB and notify the VIOS and
+ * a negative value otherwise.
+ */
static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
struct ibmvnic_long_term_buff *ltb, int size)
{
struct device *dev = &adapter->vdev->dev;
int rc;
- ltb->size = size;
- ltb->buff = dma_alloc_coherent(dev, ltb->size, &ltb->addr,
- GFP_KERNEL);
+ if (!reuse_ltb(ltb, size)) {
+ dev_dbg(dev,
+ "LTB size changed from 0x%llx to 0x%x, reallocating\n",
+ ltb->size, size);
+ free_long_term_buff(adapter, ltb);
+ }
- if (!ltb->buff) {
- dev_err(dev, "Couldn't alloc long term buffer\n");
- return -ENOMEM;
+ if (ltb->buff) {
+ dev_dbg(dev, "Reusing LTB [map %d, size 0x%llx]\n",
+ ltb->map_id, ltb->size);
+ } else {
+ ltb->buff = dma_alloc_coherent(dev, size, &ltb->addr,
+ GFP_KERNEL);
+ if (!ltb->buff) {
+ dev_err(dev, "Couldn't alloc long term buffer\n");
+ return -ENOMEM;
+ }
+ ltb->size = size;
+
+ ltb->map_id = find_first_zero_bit(adapter->map_ids,
+ MAX_MAP_ID);
+ bitmap_set(adapter->map_ids, ltb->map_id, 1);
+
+ dev_dbg(dev,
+ "Allocated new LTB [map %d, size 0x%llx]\n",
+ ltb->map_id, ltb->size);
}
- ltb->map_id = adapter->map_id;
- adapter->map_id++;
+
+ /* Ensure ltb is zeroed - specially when reusing it. */
+ memset(ltb->buff, 0, ltb->size);
mutex_lock(&adapter->fw_lock);
adapter->fw_done_rc = 0;
@@ -243,24 +300,20 @@ static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
if (rc) {
- dev_err(dev,
- "Long term map request aborted or timed out,rc = %d\n",
+ dev_err(dev, "LTB map request aborted or timed out, rc = %d\n",
rc);
goto out;
}
if (adapter->fw_done_rc) {
- dev_err(dev, "Couldn't map long term buffer,rc = %d\n",
+ dev_err(dev, "Couldn't map LTB, rc = %d\n",
adapter->fw_done_rc);
rc = -1;
goto out;
}
rc = 0;
out:
- if (rc) {
- dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
- ltb->buff = NULL;
- }
+ /* don't free LTB on communication error - see function header */
mutex_unlock(&adapter->fw_lock);
return rc;
}
@@ -281,48 +334,15 @@ static void free_long_term_buff(struct ibmvnic_adapter *adapter,
adapter->reset_reason != VNIC_RESET_MOBILITY &&
adapter->reset_reason != VNIC_RESET_TIMEOUT)
send_request_unmap(adapter, ltb->map_id);
+
dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
+
ltb->buff = NULL;
+ /* mark this map_id free */
+ bitmap_clear(adapter->map_ids, ltb->map_id, 1);
ltb->map_id = 0;
}
-static int reset_long_term_buff(struct ibmvnic_adapter *adapter,
- struct ibmvnic_long_term_buff *ltb)
-{
- struct device *dev = &adapter->vdev->dev;
- int rc;
-
- memset(ltb->buff, 0, ltb->size);
-
- mutex_lock(&adapter->fw_lock);
- adapter->fw_done_rc = 0;
-
- reinit_completion(&adapter->fw_done);
- rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id);
- if (rc) {
- mutex_unlock(&adapter->fw_lock);
- return rc;
- }
-
- rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
- if (rc) {
- dev_info(dev,
- "Reset failed, long term map request timed out or aborted\n");
- mutex_unlock(&adapter->fw_lock);
- return rc;
- }
-
- if (adapter->fw_done_rc) {
- dev_info(dev,
- "Reset failed, attempting to free and reallocate buffer\n");
- free_long_term_buff(adapter, ltb);
- mutex_unlock(&adapter->fw_lock);
- return alloc_long_term_buff(adapter, ltb, ltb->size);
- }
- mutex_unlock(&adapter->fw_lock);
- return 0;
-}
-
static void deactivate_rx_pools(struct ibmvnic_adapter *adapter)
{
int i;
@@ -363,31 +383,41 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
* be 0.
*/
for (i = ind_bufp->index; i < count; ++i) {
- skb = netdev_alloc_skb(adapter->netdev, pool->buff_size);
+ index = pool->free_map[pool->next_free];
+
+ /* We maybe reusing the skb from earlier resets. Allocate
+ * only if necessary. But since the LTB may have changed
+ * during reset (see init_rx_pools()), update LTB below
+ * even if reusing skb.
+ */
+ skb = pool->rx_buff[index].skb;
if (!skb) {
- dev_err(dev, "Couldn't replenish rx buff\n");
- adapter->replenish_no_mem++;
- break;
+ skb = netdev_alloc_skb(adapter->netdev,
+ pool->buff_size);
+ if (!skb) {
+ dev_err(dev, "Couldn't replenish rx buff\n");
+ adapter->replenish_no_mem++;
+ break;
+ }
}
- index = pool->free_map[pool->next_free];
-
- if (pool->rx_buff[index].skb)
- dev_err(dev, "Inconsistent free_map!\n");
+ pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP;
+ pool->next_free = (pool->next_free + 1) % pool->size;
/* Copy the skb to the long term mapped DMA buffer */
offset = index * pool->buff_size;
dst = pool->long_term_buff.buff + offset;
memset(dst, 0, pool->buff_size);
dma_addr = pool->long_term_buff.addr + offset;
- pool->rx_buff[index].data = dst;
- pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP;
+ /* add the skb to an rx_buff in the pool */
+ pool->rx_buff[index].data = dst;
pool->rx_buff[index].dma = dma_addr;
pool->rx_buff[index].skb = skb;
pool->rx_buff[index].pool_index = pool->index;
pool->rx_buff[index].size = pool->buff_size;
+ /* queue the rx_buff for the next send_subcrq_indirect */
sub_crq = &ind_bufp->indir_arr[ind_bufp->index++];
memset(sub_crq, 0, sizeof(*sub_crq));
sub_crq->rx_add.first = IBMVNIC_CRQ_CMD;
@@ -405,7 +435,8 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
shift = 8;
#endif
sub_crq->rx_add.len = cpu_to_be32(pool->buff_size << shift);
- pool->next_free = (pool->next_free + 1) % pool->size;
+
+ /* if send_subcrq_indirect queue is full, flush to VIOS */
if (ind_bufp->index == IBMVNIC_MAX_IND_DESCS ||
i == count - 1) {
lpar_rc =
@@ -523,53 +554,12 @@ static int init_stats_token(struct ibmvnic_adapter *adapter)
return 0;
}
-static int reset_rx_pools(struct ibmvnic_adapter *adapter)
-{
- struct ibmvnic_rx_pool *rx_pool;
- u64 buff_size;
- int rx_scrqs;
- int i, j, rc;
-
- if (!adapter->rx_pool)
- return -1;
-
- buff_size = adapter->cur_rx_buf_sz;
- rx_scrqs = adapter->num_active_rx_pools;
- for (i = 0; i < rx_scrqs; i++) {
- rx_pool = &adapter->rx_pool[i];
-
- netdev_dbg(adapter->netdev, "Re-setting rx_pool[%d]\n", i);
-
- if (rx_pool->buff_size != buff_size) {
- free_long_term_buff(adapter, &rx_pool->long_term_buff);
- rx_pool->buff_size = ALIGN(buff_size, L1_CACHE_BYTES);
- rc = alloc_long_term_buff(adapter,
- &rx_pool->long_term_buff,
- rx_pool->size *
- rx_pool->buff_size);
- } else {
- rc = reset_long_term_buff(adapter,
- &rx_pool->long_term_buff);
- }
-
- if (rc)
- return rc;
-
- for (j = 0; j < rx_pool->size; j++)
- rx_pool->free_map[j] = j;
-
- memset(rx_pool->rx_buff, 0,
- rx_pool->size * sizeof(struct ibmvnic_rx_buff));
-
- atomic_set(&rx_pool->available, 0);
- rx_pool->next_alloc = 0;
- rx_pool->next_free = 0;
- rx_pool->active = 1;
- }
-
- return 0;
-}
-
+/**
+ * release_rx_pools() - Release any rx pools attached to @adapter.
+ * @adapter: ibmvnic adapter
+ *
+ * Safe to call this multiple times - even if no pools are attached.
+ */
static void release_rx_pools(struct ibmvnic_adapter *adapter)
{
struct ibmvnic_rx_pool *rx_pool;
@@ -584,6 +574,7 @@ static void release_rx_pools(struct ibmvnic_adapter *adapter)
netdev_dbg(adapter->netdev, "Releasing rx_pool[%d]\n", i);
kfree(rx_pool->free_map);
+
free_long_term_buff(adapter, &rx_pool->long_term_buff);
if (!rx_pool->rx_buff)
@@ -602,21 +593,91 @@ static void release_rx_pools(struct ibmvnic_adapter *adapter)
kfree(adapter->rx_pool);
adapter->rx_pool = NULL;
adapter->num_active_rx_pools = 0;
+ adapter->prev_rx_pool_size = 0;
}
+/**
+ * reuse_rx_pools() - Check if the existing rx pools can be reused.
+ * @adapter: ibmvnic adapter
+ *
+ * Check if the existing rx pools in the adapter can be reused. The
+ * pools can be reused if the pool parameters (number of pools,
+ * number of buffers in the pool and size of each buffer) have not
+ * changed.
+ *
+ * NOTE: This assumes that all pools have the same number of buffers
+ * which is the case currently. If that changes, we must fix this.
+ *
+ * Return: true if the rx pools can be reused, false otherwise.
+ */
+static bool reuse_rx_pools(struct ibmvnic_adapter *adapter)
+{
+ u64 old_num_pools, new_num_pools;
+ u64 old_pool_size, new_pool_size;
+ u64 old_buff_size, new_buff_size;
+
+ if (!adapter->rx_pool)
+ return false;
+
+ old_num_pools = adapter->num_active_rx_pools;
+ new_num_pools = adapter->req_rx_queues;
+
+ old_pool_size = adapter->prev_rx_pool_size;
+ new_pool_size = adapter->req_rx_add_entries_per_subcrq;
+
+ old_buff_size = adapter->prev_rx_buf_sz;
+ new_buff_size = adapter->cur_rx_buf_sz;
+
+ /* Require buff size to be exactly same for now */
+ if (old_buff_size != new_buff_size)
+ return false;
+
+ if (old_num_pools == new_num_pools && old_pool_size == new_pool_size)
+ return true;
+
+ if (old_num_pools < adapter->min_rx_queues ||
+ old_num_pools > adapter->max_rx_queues ||
+ old_pool_size < adapter->min_rx_add_entries_per_subcrq ||
+ old_pool_size > adapter->max_rx_add_entries_per_subcrq)
+ return false;
+
+ return true;
+}
+
+/**
+ * init_rx_pools(): Initialize the set of receiver pools in the adapter.
+ * @netdev: net device associated with the vnic interface
+ *
+ * Initialize the set of receiver pools in the ibmvnic adapter associated
+ * with the net_device @netdev. If possible, reuse the existing rx pools.
+ * Otherwise free any existing pools and allocate a new set of pools
+ * before initializing them.
+ *
+ * Return: 0 on success and negative value on error.
+ */
static int init_rx_pools(struct net_device *netdev)
{
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
struct device *dev = &adapter->vdev->dev;
struct ibmvnic_rx_pool *rx_pool;
- int rxadd_subcrqs;
+ u64 num_pools;
+ u64 pool_size; /* # of buffers in one pool */
u64 buff_size;
int i, j;
- rxadd_subcrqs = adapter->num_active_rx_scrqs;
+ pool_size = adapter->req_rx_add_entries_per_subcrq;
+ num_pools = adapter->req_rx_queues;
buff_size = adapter->cur_rx_buf_sz;
- adapter->rx_pool = kcalloc(rxadd_subcrqs,
+ if (reuse_rx_pools(adapter)) {
+ dev_dbg(dev, "Reusing rx pools\n");
+ goto update_ltb;
+ }
+
+ /* Allocate/populate the pools. */
+ release_rx_pools(adapter);
+
+ adapter->rx_pool = kcalloc(num_pools,
sizeof(struct ibmvnic_rx_pool),
GFP_KERNEL);
if (!adapter->rx_pool) {
@@ -624,26 +685,27 @@ static int init_rx_pools(struct net_device *netdev)
return -1;
}
- adapter->num_active_rx_pools = rxadd_subcrqs;
+ /* Set num_active_rx_pools early. If we fail below after partial
+ * allocation, release_rx_pools() will know how many to look for.
+ */
+ adapter->num_active_rx_pools = num_pools;
- for (i = 0; i < rxadd_subcrqs; i++) {
+ for (i = 0; i < num_pools; i++) {
rx_pool = &adapter->rx_pool[i];
netdev_dbg(adapter->netdev,
"Initializing rx_pool[%d], %lld buffs, %lld bytes each\n",
- i, adapter->req_rx_add_entries_per_subcrq,
- buff_size);
+ i, pool_size, buff_size);
- rx_pool->size = adapter->req_rx_add_entries_per_subcrq;
+ rx_pool->size = pool_size;
rx_pool->index = i;
rx_pool->buff_size = ALIGN(buff_size, L1_CACHE_BYTES);
- rx_pool->active = 1;
rx_pool->free_map = kcalloc(rx_pool->size, sizeof(int),
GFP_KERNEL);
if (!rx_pool->free_map) {
- release_rx_pools(adapter);
- return -1;
+ dev_err(dev, "Couldn't alloc free_map %d\n", i);
+ goto out_release;
}
rx_pool->rx_buff = kcalloc(rx_pool->size,
@@ -651,69 +713,58 @@ static int init_rx_pools(struct net_device *netdev)
GFP_KERNEL);
if (!rx_pool->rx_buff) {
dev_err(dev, "Couldn't alloc rx buffers\n");
- release_rx_pools(adapter);
- return -1;
+ goto out_release;
}
-
- if (alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
- rx_pool->size * rx_pool->buff_size)) {
- release_rx_pools(adapter);
- return -1;
- }
-
- for (j = 0; j < rx_pool->size; ++j)
- rx_pool->free_map[j] = j;
-
- atomic_set(&rx_pool->available, 0);
- rx_pool->next_alloc = 0;
- rx_pool->next_free = 0;
}
- return 0;
-}
-
-static int reset_one_tx_pool(struct ibmvnic_adapter *adapter,
- struct ibmvnic_tx_pool *tx_pool)
-{
- int rc, i;
-
- rc = reset_long_term_buff(adapter, &tx_pool->long_term_buff);
- if (rc)
- return rc;
-
- memset(tx_pool->tx_buff, 0,
- tx_pool->num_buffers *
- sizeof(struct ibmvnic_tx_buff));
+ adapter->prev_rx_pool_size = pool_size;
+ adapter->prev_rx_buf_sz = adapter->cur_rx_buf_sz;
- for (i = 0; i < tx_pool->num_buffers; i++)
- tx_pool->free_map[i] = i;
+update_ltb:
+ for (i = 0; i < num_pools; i++) {
+ rx_pool = &adapter->rx_pool[i];
+ dev_dbg(dev, "Updating LTB for rx pool %d [%d, %d]\n",
+ i, rx_pool->size, rx_pool->buff_size);
- tx_pool->consumer_index = 0;
- tx_pool->producer_index = 0;
+ if (alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
+ rx_pool->size * rx_pool->buff_size))
+ goto out;
- return 0;
-}
+ for (j = 0; j < rx_pool->size; ++j) {
+ struct ibmvnic_rx_buff *rx_buff;
-static int reset_tx_pools(struct ibmvnic_adapter *adapter)
-{
- int tx_scrqs;
- int i, rc;
+ rx_pool->free_map[j] = j;
- if (!adapter->tx_pool)
- return -1;
+ /* NOTE: Don't clear rx_buff->skb here - will leak
+ * memory! replenish_rx_pool() will reuse skbs or
+ * allocate as necessary.
+ */
+ rx_buff = &rx_pool->rx_buff[j];
+ rx_buff->dma = 0;
+ rx_buff->data = 0;
+ rx_buff->size = 0;
+ rx_buff->pool_index = 0;
+ }
- tx_scrqs = adapter->num_active_tx_pools;
- for (i = 0; i < tx_scrqs; i++) {
- ibmvnic_tx_scrq_clean_buffer(adapter, adapter->tx_scrq[i]);
- rc = reset_one_tx_pool(adapter, &adapter->tso_pool[i]);
- if (rc)
- return rc;
- rc = reset_one_tx_pool(adapter, &adapter->tx_pool[i]);
- if (rc)
- return rc;
+ /* Mark pool "empty" so replenish_rx_pools() will
+ * update the LTB info for each buffer
+ */
+ atomic_set(&rx_pool->available, 0);
+ rx_pool->next_alloc = 0;
+ rx_pool->next_free = 0;
+ /* replenish_rx_pool() may have called deactivate_rx_pools()
+ * on failover. Ensure pool is active now.
+ */
+ rx_pool->active = 1;
}
-
return 0;
+out_release:
+ release_rx_pools(adapter);
+out:
+ /* We failed to allocate one or more LTBs or map them on the VIOS.
+ * Hold onto the pools and any LTBs that we did allocate/map.
+ */
+ return -1;
}
static void release_vpd_data(struct ibmvnic_adapter *adapter)
@@ -735,10 +786,19 @@ static void release_one_tx_pool(struct ibmvnic_adapter *adapter,
free_long_term_buff(adapter, &tx_pool->long_term_buff);
}
+/**
+ * release_tx_pools() - Release any tx pools attached to @adapter.
+ * @adapter: ibmvnic adapter
+ *
+ * Safe to call this multiple times - even if no pools are attached.
+ */
static void release_tx_pools(struct ibmvnic_adapter *adapter)
{
int i;
+ /* init_tx_pools() ensures that ->tx_pool and ->tso_pool are
+ * both NULL or both non-NULL. So we only need to check one.
+ */
if (!adapter->tx_pool)
return;
@@ -752,84 +812,218 @@ static void release_tx_pools(struct ibmvnic_adapter *adapter)
kfree(adapter->tso_pool);
adapter->tso_pool = NULL;
adapter->num_active_tx_pools = 0;
+ adapter->prev_tx_pool_size = 0;
}
static int init_one_tx_pool(struct net_device *netdev,
struct ibmvnic_tx_pool *tx_pool,
- int num_entries, int buf_size)
+ int pool_size, int buf_size)
{
- struct ibmvnic_adapter *adapter = netdev_priv(netdev);
int i;
- tx_pool->tx_buff = kcalloc(num_entries,
+ tx_pool->tx_buff = kcalloc(pool_size,
sizeof(struct ibmvnic_tx_buff),
GFP_KERNEL);
if (!tx_pool->tx_buff)
return -1;
- if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff,
- num_entries * buf_size))
- return -1;
-
- tx_pool->free_map = kcalloc(num_entries, sizeof(int), GFP_KERNEL);
- if (!tx_pool->free_map)
+ tx_pool->free_map = kcalloc(pool_size, sizeof(int), GFP_KERNEL);
+ if (!tx_pool->free_map) {
+ kfree(tx_pool->tx_buff);
+ tx_pool->tx_buff = NULL;
return -1;
+ }
- for (i = 0; i < num_entries; i++)
+ for (i = 0; i < pool_size; i++)
tx_pool->free_map[i] = i;
tx_pool->consumer_index = 0;
tx_pool->producer_index = 0;
- tx_pool->num_buffers = num_entries;
+ tx_pool->num_buffers = pool_size;
tx_pool->buf_size = buf_size;
return 0;
}
+/**
+ * reuse_tx_pools() - Check if the existing tx pools can be reused.
+ * @adapter: ibmvnic adapter
+ *
+ * Check if the existing tx pools in the adapter can be reused. The
+ * pools can be reused if the pool parameters (number of pools,
+ * number of buffers in the pool and mtu) have not changed.
+ *
+ * NOTE: This assumes that all pools have the same number of buffers
+ * which is the case currently. If that changes, we must fix this.
+ *
+ * Return: true if the tx pools can be reused, false otherwise.
+ */
+static bool reuse_tx_pools(struct ibmvnic_adapter *adapter)
+{
+ u64 old_num_pools, new_num_pools;
+ u64 old_pool_size, new_pool_size;
+ u64 old_mtu, new_mtu;
+
+ if (!adapter->tx_pool)
+ return false;
+
+ old_num_pools = adapter->num_active_tx_pools;
+ new_num_pools = adapter->num_active_tx_scrqs;
+ old_pool_size = adapter->prev_tx_pool_size;
+ new_pool_size = adapter->req_tx_entries_per_subcrq;
+ old_mtu = adapter->prev_mtu;
+ new_mtu = adapter->req_mtu;
+
+ /* Require MTU to be exactly same to reuse pools for now */
+ if (old_mtu != new_mtu)
+ return false;
+
+ if (old_num_pools == new_num_pools && old_pool_size == new_pool_size)
+ return true;
+
+ if (old_num_pools < adapter->min_tx_queues ||
+ old_num_pools > adapter->max_tx_queues ||
+ old_pool_size < adapter->min_tx_entries_per_subcrq ||
+ old_pool_size > adapter->max_tx_entries_per_subcrq)
+ return false;
+
+ return true;
+}
+
+/**
+ * init_tx_pools(): Initialize the set of transmit pools in the adapter.
+ * @netdev: net device associated with the vnic interface
+ *
+ * Initialize the set of transmit pools in the ibmvnic adapter associated
+ * with the net_device @netdev. If possible, reuse the existing tx pools.
+ * Otherwise free any existing pools and allocate a new set of pools
+ * before initializing them.
+ *
+ * Return: 0 on success and negative value on error.
+ */
static int init_tx_pools(struct net_device *netdev)
{
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
- int tx_subcrqs;
+ struct device *dev = &adapter->vdev->dev;
+ int num_pools;
+ u64 pool_size; /* # of buffers in pool */
u64 buff_size;
- int i, rc;
+ int i, j, rc;
+
+ num_pools = adapter->req_tx_queues;
+
+ /* We must notify the VIOS about the LTB on all resets - but we only
+ * need to alloc/populate pools if either the number of buffers or
+ * size of each buffer in the pool has changed.
+ */
+ if (reuse_tx_pools(adapter)) {
+ netdev_dbg(netdev, "Reusing tx pools\n");
+ goto update_ltb;
+ }
+
+ /* Allocate/populate the pools. */
+ release_tx_pools(adapter);
- tx_subcrqs = adapter->num_active_tx_scrqs;
- adapter->tx_pool = kcalloc(tx_subcrqs,
+ pool_size = adapter->req_tx_entries_per_subcrq;
+ num_pools = adapter->num_active_tx_scrqs;
+
+ adapter->tx_pool = kcalloc(num_pools,
sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
if (!adapter->tx_pool)
return -1;
- adapter->tso_pool = kcalloc(tx_subcrqs,
+ adapter->tso_pool = kcalloc(num_pools,
sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
+ /* To simplify release_tx_pools() ensure that ->tx_pool and
+ * ->tso_pool are either both NULL or both non-NULL.
+ */
if (!adapter->tso_pool) {
kfree(adapter->tx_pool);
adapter->tx_pool = NULL;
return -1;
}
- adapter->num_active_tx_pools = tx_subcrqs;
+ /* Set num_active_tx_pools early. If we fail below after partial
+ * allocation, release_tx_pools() will know how many to look for.
+ */
+ adapter->num_active_tx_pools = num_pools;
+
+ buff_size = adapter->req_mtu + VLAN_HLEN;
+ buff_size = ALIGN(buff_size, L1_CACHE_BYTES);
+
+ for (i = 0; i < num_pools; i++) {
+ dev_dbg(dev, "Init tx pool %d [%llu, %llu]\n",
+ i, adapter->req_tx_entries_per_subcrq, buff_size);
- for (i = 0; i < tx_subcrqs; i++) {
- buff_size = adapter->req_mtu + VLAN_HLEN;
- buff_size = ALIGN(buff_size, L1_CACHE_BYTES);
rc = init_one_tx_pool(netdev, &adapter->tx_pool[i],
- adapter->req_tx_entries_per_subcrq,
- buff_size);
- if (rc) {
- release_tx_pools(adapter);
- return rc;
- }
+ pool_size, buff_size);
+ if (rc)
+ goto out_release;
rc = init_one_tx_pool(netdev, &adapter->tso_pool[i],
IBMVNIC_TSO_BUFS,
IBMVNIC_TSO_BUF_SZ);
- if (rc) {
- release_tx_pools(adapter);
- return rc;
- }
+ if (rc)
+ goto out_release;
+ }
+
+ adapter->prev_tx_pool_size = pool_size;
+ adapter->prev_mtu = adapter->req_mtu;
+
+update_ltb:
+ /* NOTE: All tx_pools have the same number of buffers (which is
+ * same as pool_size). All tso_pools have IBMVNIC_TSO_BUFS
+ * buffers (see calls init_one_tx_pool() for these).
+ * For consistency, we use tx_pool->num_buffers and
+ * tso_pool->num_buffers below.
+ */
+ rc = -1;
+ for (i = 0; i < num_pools; i++) {
+ struct ibmvnic_tx_pool *tso_pool;
+ struct ibmvnic_tx_pool *tx_pool;
+ u32 ltb_size;
+
+ tx_pool = &adapter->tx_pool[i];
+ ltb_size = tx_pool->num_buffers * tx_pool->buf_size;
+ if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff,
+ ltb_size))
+ goto out;
+
+ dev_dbg(dev, "Updated LTB for tx pool %d [%p, %d, %d]\n",
+ i, tx_pool->long_term_buff.buff,
+ tx_pool->num_buffers, tx_pool->buf_size);
+
+ tx_pool->consumer_index = 0;
+ tx_pool->producer_index = 0;
+
+ for (j = 0; j < tx_pool->num_buffers; j++)
+ tx_pool->free_map[j] = j;
+
+ tso_pool = &adapter->tso_pool[i];
+ ltb_size = tso_pool->num_buffers * tso_pool->buf_size;
+ if (alloc_long_term_buff(adapter, &tso_pool->long_term_buff,
+ ltb_size))
+ goto out;
+
+ dev_dbg(dev, "Updated LTB for tso pool %d [%p, %d, %d]\n",
+ i, tso_pool->long_term_buff.buff,
+ tso_pool->num_buffers, tso_pool->buf_size);
+
+ tso_pool->consumer_index = 0;
+ tso_pool->producer_index = 0;
+
+ for (j = 0; j < tso_pool->num_buffers; j++)
+ tso_pool->free_map[j] = j;
}
return 0;
+out_release:
+ release_tx_pools(adapter);
+out:
+ /* We failed to allocate one or more LTBs or map them on the VIOS.
+ * Hold onto the pools and any LTBs that we did allocate/map.
+ */
+ return rc;
}
static void ibmvnic_napi_enable(struct ibmvnic_adapter *adapter)
@@ -1020,9 +1214,6 @@ static void release_resources(struct ibmvnic_adapter *adapter)
{
release_vpd_data(adapter);
- release_tx_pools(adapter);
- release_rx_pools(adapter);
-
release_napi(adapter);
release_login_buffer(adapter);
release_login_rsp_buffer(adapter);
@@ -1198,8 +1389,6 @@ static int init_resources(struct ibmvnic_adapter *adapter)
return rc;
}
- adapter->map_id = 1;
-
rc = init_napi(adapter);
if (rc)
return rc;
@@ -1296,6 +1485,8 @@ static int ibmvnic_open(struct net_device *netdev)
if (rc) {
netdev_err(netdev, "failed to initialize resources\n");
release_resources(adapter);
+ release_rx_pools(adapter);
+ release_tx_pools(adapter);
goto out;
}
}
@@ -1424,9 +1615,6 @@ static void ibmvnic_cleanup(struct net_device *netdev)
ibmvnic_napi_disable(adapter);
ibmvnic_disable_irqs(adapter);
-
- clean_rx_pools(adapter);
- clean_tx_pools(adapter);
}
static int __ibmvnic_close(struct net_device *netdev)
@@ -1460,6 +1648,8 @@ static int ibmvnic_close(struct net_device *netdev)
rc = __ibmvnic_close(netdev);
ibmvnic_cleanup(netdev);
+ clean_rx_pools(adapter);
+ clean_tx_pools(adapter);
return rc;
}
@@ -2036,9 +2226,9 @@ static const char *reset_reason_to_string(enum ibmvnic_reset_reason reason)
static int do_reset(struct ibmvnic_adapter *adapter,
struct ibmvnic_rwi *rwi, u32 reset_state)
{
+ struct net_device *netdev = adapter->netdev;
u64 old_num_rx_queues, old_num_tx_queues;
u64 old_num_rx_slots, old_num_tx_slots;
- struct net_device *netdev = adapter->netdev;
int rc;
netdev_dbg(adapter->netdev,
@@ -2188,8 +2378,6 @@ static int do_reset(struct ibmvnic_adapter *adapter,
!adapter->rx_pool ||
!adapter->tso_pool ||
!adapter->tx_pool) {
- release_rx_pools(adapter);
- release_tx_pools(adapter);
release_napi(adapter);
release_vpd_data(adapter);
@@ -2198,16 +2386,18 @@ static int do_reset(struct ibmvnic_adapter *adapter,
goto out;
} else {
- rc = reset_tx_pools(adapter);
+ rc = init_tx_pools(netdev);
if (rc) {
- netdev_dbg(adapter->netdev, "reset tx pools failed (%d)\n",
+ netdev_dbg(netdev,
+ "init tx pools failed (%d)\n",
rc);
goto out;
}
- rc = reset_rx_pools(adapter);
+ rc = init_rx_pools(netdev);
if (rc) {
- netdev_dbg(adapter->netdev, "reset rx pools failed (%d)\n",
+ netdev_dbg(netdev,
+ "init rx pools failed (%d)\n",
rc);
goto out;
}
@@ -4576,8 +4766,7 @@ static int handle_change_mac_rsp(union ibmvnic_crq *crq,
/* crq->change_mac_addr.mac_addr is the requested one
* crq->change_mac_addr_rsp.mac_addr is the returned valid one.
*/
- ether_addr_copy(netdev->dev_addr,
- &crq->change_mac_addr_rsp.mac_addr[0]);
+ eth_hw_addr_set(netdev, &crq->change_mac_addr_rsp.mac_addr[0]);
ether_addr_copy(adapter->mac_addr,
&crq->change_mac_addr_rsp.mac_addr[0]);
out:
@@ -4778,9 +4967,10 @@ static void handle_query_map_rsp(union ibmvnic_crq *crq,
dev_err(dev, "Error %ld in QUERY_MAP_RSP\n", rc);
return;
}
- netdev_dbg(netdev, "page_size = %d\ntot_pages = %d\nfree_pages = %d\n",
- crq->query_map_rsp.page_size, crq->query_map_rsp.tot_pages,
- crq->query_map_rsp.free_pages);
+ netdev_dbg(netdev, "page_size = %d\ntot_pages = %u\nfree_pages = %u\n",
+ crq->query_map_rsp.page_size,
+ __be32_to_cpu(crq->query_map_rsp.tot_pages),
+ __be32_to_cpu(crq->query_map_rsp.free_pages));
}
static void handle_query_cap_rsp(union ibmvnic_crq *crq,
@@ -5527,9 +5717,12 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
adapter->vdev = dev;
adapter->netdev = netdev;
adapter->login_pending = false;
+ memset(&adapter->map_ids, 0, sizeof(adapter->map_ids));
+ /* map_ids start at 1, so ensure map_id 0 is always "in-use" */
+ bitmap_set(adapter->map_ids, 0, 1);
ether_addr_copy(adapter->mac_addr, mac_addr_p);
- ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
+ eth_hw_addr_set(netdev, adapter->mac_addr);
netdev->irq = dev->irq;
netdev->netdev_ops = &ibmvnic_netdev_ops;
netdev->ethtool_ops = &ibmvnic_ethtool_ops;
@@ -5547,6 +5740,8 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
init_completion(&adapter->reset_done);
init_completion(&adapter->stats_done);
clear_bit(0, &adapter->resetting);
+ adapter->prev_rx_buf_sz = 0;
+ adapter->prev_mtu = 0;
init_success = false;
do {
@@ -5647,6 +5842,8 @@ static void ibmvnic_remove(struct vio_dev *dev)
unregister_netdevice(netdev);
release_resources(adapter);
+ release_rx_pools(adapter);
+ release_tx_pools(adapter);
release_sub_crqs(adapter, 1);
release_crq_queue(adapter);
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index 22df602323bc..b8e42f67d897 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -827,7 +827,7 @@ struct ibmvnic_rx_buff {
struct ibmvnic_rx_pool {
struct ibmvnic_rx_buff *rx_buff;
- int size;
+ int size; /* # of buffers in the pool */
int index;
int buff_size;
atomic_t available;
@@ -967,6 +967,7 @@ struct ibmvnic_adapter {
u64 min_mtu;
u64 max_mtu;
u64 req_mtu;
+ u64 prev_mtu;
u64 max_multicast_filters;
u64 vlan_header_insertion;
u64 rx_vlan_header_insertion;
@@ -979,13 +980,18 @@ struct ibmvnic_adapter {
u64 opt_tx_entries_per_subcrq;
u64 opt_rxba_entries_per_subcrq;
__be64 tx_rx_desc_req;
- u8 map_id;
+#define MAX_MAP_ID 255
+ DECLARE_BITMAP(map_ids, MAX_MAP_ID);
u32 num_active_rx_scrqs;
u32 num_active_rx_pools;
u32 num_active_rx_napi;
u32 num_active_tx_scrqs;
u32 num_active_tx_pools;
+
+ u32 prev_rx_pool_size;
+ u32 prev_tx_pool_size;
u32 cur_rx_buf_sz;
+ u32 prev_rx_buf_sz;
struct tasklet_struct tasklet;
enum vnic_state state;
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index ed8ea63bb172..0b274d8fa45b 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -313,6 +313,20 @@ config ICE
To compile this driver as a module, choose M here. The module
will be called ice.
+config ICE_SWITCHDEV
+ bool "Switchdev Support"
+ default y
+ depends on ICE && NET_SWITCHDEV
+ help
+ Switchdev support provides internal SRIOV packet steering and switching.
+
+ To enable it on running kernel use devlink tool:
+ #devlink dev eswitch set pci/0000:XX:XX.X mode switchdev
+
+ Say Y here if you want to use Switchdev in the driver.
+
+ If unsure, say N.
+
config FM10K
tristate "Intel(R) FM10000 Ethernet Switch Host Interface Support"
default n
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index 09ae1939e6db..5039a2536951 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -2259,7 +2259,7 @@ static int e100_set_mac_address(struct net_device *netdev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, addr->sa_data);
e100_exec_cb(nic, NULL, e100_setup_iaaddr);
return 0;
@@ -2921,7 +2921,7 @@ static int e100_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
e100_phy_init(nic);
- memcpy(netdev->dev_addr, nic->eeprom, ETH_ALEN);
+ eth_hw_addr_set(netdev, (u8 *)nic->eeprom);
if (!is_valid_ether_addr(netdev->dev_addr)) {
if (!eeprom_bad_csum_allow) {
netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, aborting\n");
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index bed4f040face..669060a2e6aa 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -1103,7 +1103,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
e_err(probe, "EEPROM Read Error\n");
}
/* don't block initialization here due to bad MAC address */
- memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len);
+ eth_hw_addr_set(netdev, hw->mac_addr);
if (!is_valid_ether_addr(netdev->dev_addr))
e_err(probe, "Invalid MAC Address\n");
@@ -2209,7 +2209,7 @@ static int e1000_set_mac(struct net_device *netdev, void *p)
if (hw->mac_type == e1000_82542_rev2_0)
e1000_enter_82542_rst(adapter);
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, addr->sa_data);
memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len);
e1000_rar_set(hw, hw->mac_addr, 0);
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index 3178efd98006..c3def0ee7788 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -21,6 +21,7 @@
#include <linux/ptp_classify.h>
#include <linux/mii.h>
#include <linux/mdio.h>
+#include <linux/mutex.h>
#include <linux/pm_qos.h>
#include "hw.h"
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index ebcb2a30add0..44e2dc8328a2 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -2550,7 +2550,6 @@ static void e1000_set_itr(struct e1000_adapter *adapter)
/* for non-gigabit speeds, just fix the interrupt rate at 4000 */
if (adapter->link_speed != SPEED_1000) {
- current_itr = 0;
new_itr = 4000;
goto set_itr_now;
}
@@ -4787,7 +4786,7 @@ static int e1000_set_mac(struct net_device *netdev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, addr->sa_data);
memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len);
hw->mac.ops.rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
@@ -7590,7 +7589,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_err(&pdev->dev,
"NVM Read Error while reading MAC address\n");
- memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
+ eth_hw_addr_set(netdev, adapter->hw.mac.addr);
if (!is_valid_ether_addr(netdev->dev_addr)) {
dev_err(&pdev->dev, "Invalid MAC Address: %pM\n",
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
index 2fb52bd6fc0e..2cca9e84e31e 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
@@ -990,7 +990,7 @@ static int fm10k_set_mac(struct net_device *dev, void *p)
}
if (!err) {
- ether_addr_copy(dev->dev_addr, addr->sa_data);
+ eth_hw_addr_set(dev, addr->sa_data);
ether_addr_copy(hw->mac.addr, addr->sa_data);
dev->addr_assign_type &= ~NET_ADDR_RANDOM;
}
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
index adfa2768f024..b473cb7d7c57 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
@@ -300,7 +300,7 @@ static int fm10k_handle_reset(struct fm10k_intfc *interface)
if (is_valid_ether_addr(hw->mac.perm_addr)) {
ether_addr_copy(hw->mac.addr, hw->mac.perm_addr);
ether_addr_copy(netdev->perm_addr, hw->mac.perm_addr);
- ether_addr_copy(netdev->dev_addr, hw->mac.perm_addr);
+ eth_hw_addr_set(netdev, hw->mac.perm_addr);
netdev->addr_assign_type &= ~NET_ADDR_RANDOM;
}
@@ -2045,7 +2045,7 @@ static int fm10k_sw_init(struct fm10k_intfc *interface,
netdev->addr_assign_type |= NET_ADDR_RANDOM;
}
- ether_addr_copy(netdev->dev_addr, hw->mac.addr);
+ eth_hw_addr_set(netdev, hw->mac.addr);
ether_addr_copy(netdev->perm_addr, hw->mac.addr);
if (!is_valid_ether_addr(netdev->perm_addr)) {
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 39fb3d57c057..3d528fba754b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -435,7 +435,7 @@ static inline bool i40e_is_channel_macvlan(struct i40e_channel *ch)
return !!ch->fwd;
}
-static inline u8 *i40e_channel_mac(struct i40e_channel *ch)
+static inline const u8 *i40e_channel_mac(struct i40e_channel *ch)
{
if (i40e_is_channel_macvlan(ch))
return ch->fwd->netdev->dev_addr;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index e04b540cedc8..ba862131b9bd 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -1587,7 +1587,7 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
*/
spin_lock_bh(&vsi->mac_filter_hash_lock);
i40e_del_mac_filter(vsi, netdev->dev_addr);
- ether_addr_copy(netdev->dev_addr, addr->sa_data);
+ eth_hw_addr_set(netdev, addr->sa_data);
i40e_add_mac_filter(vsi, netdev->dev_addr);
spin_unlock_bh(&vsi->mac_filter_hash_lock);
@@ -13425,7 +13425,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
i40e_add_mac_filter(vsi, broadcast);
spin_unlock_bh(&vsi->mac_filter_hash_lock);
- ether_addr_copy(netdev->dev_addr, mac_addr);
+ eth_hw_addr_set(netdev, mac_addr);
ether_addr_copy(netdev->perm_addr, mac_addr);
/* i40iw_net_event() reads 16 bytes from neigh->primary_key */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index e7e778ca074c..6f85879ba993 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -193,42 +193,40 @@ bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count)
{
u16 ntu = rx_ring->next_to_use;
union i40e_rx_desc *rx_desc;
- struct xdp_buff **bi, *xdp;
+ struct xdp_buff **xdp;
+ u32 nb_buffs, i;
dma_addr_t dma;
- bool ok = true;
rx_desc = I40E_RX_DESC(rx_ring, ntu);
- bi = i40e_rx_bi(rx_ring, ntu);
- do {
- xdp = xsk_buff_alloc(rx_ring->xsk_pool);
- if (!xdp) {
- ok = false;
- goto no_buffers;
- }
- *bi = xdp;
- dma = xsk_buff_xdp_get_dma(xdp);
+ xdp = i40e_rx_bi(rx_ring, ntu);
+
+ nb_buffs = min_t(u16, count, rx_ring->count - ntu);
+ nb_buffs = xsk_buff_alloc_batch(rx_ring->xsk_pool, xdp, nb_buffs);
+ if (!nb_buffs)
+ return false;
+
+ i = nb_buffs;
+ while (i--) {
+ dma = xsk_buff_xdp_get_dma(*xdp);
rx_desc->read.pkt_addr = cpu_to_le64(dma);
rx_desc->read.hdr_addr = 0;
rx_desc++;
- bi++;
- ntu++;
-
- if (unlikely(ntu == rx_ring->count)) {
- rx_desc = I40E_RX_DESC(rx_ring, 0);
- bi = i40e_rx_bi(rx_ring, 0);
- ntu = 0;
- }
- } while (--count);
+ xdp++;
+ }
-no_buffers:
- if (rx_ring->next_to_use != ntu) {
- /* clear the status bits for the next_to_use descriptor */
- rx_desc->wb.qword1.status_error_len = 0;
- i40e_release_rx_desc(rx_ring, ntu);
+ ntu += nb_buffs;
+ if (ntu == rx_ring->count) {
+ rx_desc = I40E_RX_DESC(rx_ring, 0);
+ xdp = i40e_rx_bi(rx_ring, 0);
+ ntu = 0;
}
- return ok;
+ /* clear the status bits for the next_to_use descriptor */
+ rx_desc->wb.qword1.status_error_len = 0;
+ i40e_release_rx_desc(rx_ring, ntu);
+
+ return count == nb_buffs ? true : false;
}
/**
@@ -365,7 +363,7 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
break;
bi = *i40e_rx_bi(rx_ring, next_to_clean);
- bi->data_end = bi->data + size;
+ xsk_buff_set_size(bi, size);
xsk_buff_dma_sync_for_cpu(bi, rx_ring->xsk_pool);
xdp_res = i40e_run_xdp_zc(rx_ring, bi);
diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
index 68c80f04113c..e0b88ff76466 100644
--- a/drivers/net/ethernet/intel/iavf/iavf.h
+++ b/drivers/net/ethernet/intel/iavf/iavf.h
@@ -177,6 +177,7 @@ enum iavf_state_t {
__IAVF_INIT_VERSION_CHECK, /* aq msg sent, awaiting reply */
__IAVF_INIT_GET_RESOURCES, /* aq msg sent, awaiting reply */
__IAVF_INIT_SW, /* got resources, setting up structs */
+ __IAVF_INIT_FAILED, /* init failed, restarting procedure */
__IAVF_RESETTING, /* in reset */
__IAVF_COMM_FAILED, /* communication with PF failed */
/* Below here, watchdog is running */
@@ -225,7 +226,6 @@ struct iavf_adapter {
struct work_struct reset_task;
struct work_struct adminq_task;
struct delayed_work client_task;
- struct delayed_work init_task;
wait_queue_head_t down_waitqueue;
struct iavf_q_vector *q_vectors;
struct list_head vlan_filter_list;
@@ -312,6 +312,7 @@ struct iavf_adapter {
struct iavf_hw hw; /* defined in iavf_type.h */
enum iavf_state_t state;
+ enum iavf_state_t last_state;
unsigned long crit_section;
struct delayed_work watchdog_task;
@@ -393,6 +394,15 @@ struct iavf_device {
extern char iavf_driver_name[];
extern struct workqueue_struct *iavf_wq;
+static inline void iavf_change_state(struct iavf_adapter *adapter,
+ enum iavf_state_t state)
+{
+ if (adapter->state != state) {
+ adapter->last_state = adapter->state;
+ adapter->state = state;
+ }
+}
+
int iavf_up(struct iavf_adapter *adapter);
void iavf_down(struct iavf_adapter *adapter);
int iavf_process_config(struct iavf_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index cada4e0e40b4..56956e449c97 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -14,7 +14,7 @@
static int iavf_setup_all_tx_resources(struct iavf_adapter *adapter);
static int iavf_setup_all_rx_resources(struct iavf_adapter *adapter);
static int iavf_close(struct net_device *netdev);
-static int iavf_init_get_resources(struct iavf_adapter *adapter);
+static void iavf_init_get_resources(struct iavf_adapter *adapter);
static int iavf_check_reset_complete(struct iavf_hw *hw);
char iavf_driver_name[] = "iavf";
@@ -960,7 +960,7 @@ static void iavf_configure(struct iavf_adapter *adapter)
**/
static void iavf_up_complete(struct iavf_adapter *adapter)
{
- adapter->state = __IAVF_RUNNING;
+ iavf_change_state(adapter, __IAVF_RUNNING);
clear_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
iavf_napi_enable_all(adapter);
@@ -1688,9 +1688,9 @@ static int iavf_process_aq_command(struct iavf_adapter *adapter)
*
* Function process __IAVF_STARTUP driver state.
* When success the state is changed to __IAVF_INIT_VERSION_CHECK
- * when fails it returns -EAGAIN
+ * when fails the state is changed to __IAVF_INIT_FAILED
**/
-static int iavf_startup(struct iavf_adapter *adapter)
+static void iavf_startup(struct iavf_adapter *adapter)
{
struct pci_dev *pdev = adapter->pdev;
struct iavf_hw *hw = &adapter->hw;
@@ -1729,9 +1729,10 @@ static int iavf_startup(struct iavf_adapter *adapter)
iavf_shutdown_adminq(hw);
goto err;
}
- adapter->state = __IAVF_INIT_VERSION_CHECK;
+ iavf_change_state(adapter, __IAVF_INIT_VERSION_CHECK);
+ return;
err:
- return err;
+ iavf_change_state(adapter, __IAVF_INIT_FAILED);
}
/**
@@ -1740,9 +1741,9 @@ err:
*
* Function process __IAVF_INIT_VERSION_CHECK driver state.
* When success the state is changed to __IAVF_INIT_GET_RESOURCES
- * when fails it returns -EAGAIN
+ * when fails the state is changed to __IAVF_INIT_FAILED
**/
-static int iavf_init_version_check(struct iavf_adapter *adapter)
+static void iavf_init_version_check(struct iavf_adapter *adapter)
{
struct pci_dev *pdev = adapter->pdev;
struct iavf_hw *hw = &adapter->hw;
@@ -1753,7 +1754,7 @@ static int iavf_init_version_check(struct iavf_adapter *adapter)
if (!iavf_asq_done(hw)) {
dev_err(&pdev->dev, "Admin queue command never completed\n");
iavf_shutdown_adminq(hw);
- adapter->state = __IAVF_STARTUP;
+ iavf_change_state(adapter, __IAVF_STARTUP);
goto err;
}
@@ -1776,10 +1777,10 @@ static int iavf_init_version_check(struct iavf_adapter *adapter)
err);
goto err;
}
- adapter->state = __IAVF_INIT_GET_RESOURCES;
-
+ iavf_change_state(adapter, __IAVF_INIT_GET_RESOURCES);
+ return;
err:
- return err;
+ iavf_change_state(adapter, __IAVF_INIT_FAILED);
}
/**
@@ -1789,9 +1790,9 @@ err:
* Function process __IAVF_INIT_GET_RESOURCES driver state and
* finishes driver initialization procedure.
* When success the state is changed to __IAVF_DOWN
- * when fails it returns -EAGAIN
+ * when fails the state is changed to __IAVF_INIT_FAILED
**/
-static int iavf_init_get_resources(struct iavf_adapter *adapter)
+static void iavf_init_get_resources(struct iavf_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
@@ -1819,7 +1820,7 @@ static int iavf_init_get_resources(struct iavf_adapter *adapter)
*/
iavf_shutdown_adminq(hw);
dev_err(&pdev->dev, "Unable to get VF config due to PF error condition, not retrying\n");
- return 0;
+ return;
}
if (err) {
dev_err(&pdev->dev, "Unable to get VF config (%d)\n", err);
@@ -1847,7 +1848,7 @@ static int iavf_init_get_resources(struct iavf_adapter *adapter)
eth_hw_addr_random(netdev);
ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
} else {
- ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
+ eth_hw_addr_set(netdev, adapter->hw.mac.addr);
ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr);
}
@@ -1893,7 +1894,7 @@ static int iavf_init_get_resources(struct iavf_adapter *adapter)
if (netdev->features & NETIF_F_GRO)
dev_info(&pdev->dev, "GRO is enabled\n");
- adapter->state = __IAVF_DOWN;
+ iavf_change_state(adapter, __IAVF_DOWN);
set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
rtnl_unlock();
@@ -1911,7 +1912,7 @@ static int iavf_init_get_resources(struct iavf_adapter *adapter)
else
iavf_init_rss(adapter);
- return err;
+ return;
err_mem:
iavf_free_rss(adapter);
err_register:
@@ -1922,7 +1923,7 @@ err_alloc:
kfree(adapter->vf_res);
adapter->vf_res = NULL;
err:
- return err;
+ iavf_change_state(adapter, __IAVF_INIT_FAILED);
}
/**
@@ -1941,9 +1942,50 @@ static void iavf_watchdog_task(struct work_struct *work)
goto restart_watchdog;
if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
- adapter->state = __IAVF_COMM_FAILED;
+ iavf_change_state(adapter, __IAVF_COMM_FAILED);
+
+ if (adapter->flags & IAVF_FLAG_RESET_NEEDED &&
+ adapter->state != __IAVF_RESETTING) {
+ iavf_change_state(adapter, __IAVF_RESETTING);
+ adapter->aq_required = 0;
+ adapter->current_op = VIRTCHNL_OP_UNKNOWN;
+ }
switch (adapter->state) {
+ case __IAVF_STARTUP:
+ iavf_startup(adapter);
+ mutex_unlock(&adapter->crit_lock);
+ queue_delayed_work(iavf_wq, &adapter->watchdog_task,
+ msecs_to_jiffies(30));
+ return;
+ case __IAVF_INIT_VERSION_CHECK:
+ iavf_init_version_check(adapter);
+ mutex_unlock(&adapter->crit_lock);
+ queue_delayed_work(iavf_wq, &adapter->watchdog_task,
+ msecs_to_jiffies(30));
+ return;
+ case __IAVF_INIT_GET_RESOURCES:
+ iavf_init_get_resources(adapter);
+ mutex_unlock(&adapter->crit_lock);
+ queue_delayed_work(iavf_wq, &adapter->watchdog_task,
+ msecs_to_jiffies(1));
+ return;
+ case __IAVF_INIT_FAILED:
+ if (++adapter->aq_wait_count > IAVF_AQ_MAX_ERR) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to communicate with PF; waiting before retry\n");
+ adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED;
+ iavf_shutdown_adminq(hw);
+ mutex_unlock(&adapter->crit_lock);
+ queue_delayed_work(iavf_wq,
+ &adapter->watchdog_task, (5 * HZ));
+ return;
+ }
+ /* Try again from failed step*/
+ iavf_change_state(adapter, adapter->last_state);
+ mutex_unlock(&adapter->crit_lock);
+ queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ);
+ return;
case __IAVF_COMM_FAILED:
reg_val = rd32(hw, IAVF_VFGEN_RSTAT) &
IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
@@ -1952,23 +1994,19 @@ static void iavf_watchdog_task(struct work_struct *work)
/* A chance for redemption! */
dev_err(&adapter->pdev->dev,
"Hardware came out of reset. Attempting reinit.\n");
- adapter->state = __IAVF_STARTUP;
- adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
- queue_delayed_work(iavf_wq, &adapter->init_task, 10);
- mutex_unlock(&adapter->crit_lock);
- /* Don't reschedule the watchdog, since we've restarted
- * the init task. When init_task contacts the PF and
+ /* When init task contacts the PF and
* gets everything set up again, it'll restart the
* watchdog for us. Down, boy. Sit. Stay. Woof.
*/
- return;
+ iavf_change_state(adapter, __IAVF_STARTUP);
+ adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
}
adapter->aq_required = 0;
adapter->current_op = VIRTCHNL_OP_UNKNOWN;
queue_delayed_work(iavf_wq,
&adapter->watchdog_task,
msecs_to_jiffies(10));
- goto watchdog_done;
+ return;
case __IAVF_RESETTING:
mutex_unlock(&adapter->crit_lock);
queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2);
@@ -1991,38 +2029,40 @@ static void iavf_watchdog_task(struct work_struct *work)
adapter->state == __IAVF_RUNNING)
iavf_request_stats(adapter);
}
+ if (adapter->state == __IAVF_RUNNING)
+ iavf_detect_recover_hung(&adapter->vsi);
break;
case __IAVF_REMOVE:
mutex_unlock(&adapter->crit_lock);
return;
default:
- goto restart_watchdog;
+ return;
}
- /* check for hw reset */
+ /* check for hw reset */
reg_val = rd32(hw, IAVF_VF_ARQLEN1) & IAVF_VF_ARQLEN1_ARQENABLE_MASK;
if (!reg_val) {
+ iavf_change_state(adapter, __IAVF_RESETTING);
adapter->flags |= IAVF_FLAG_RESET_PENDING;
adapter->aq_required = 0;
adapter->current_op = VIRTCHNL_OP_UNKNOWN;
dev_err(&adapter->pdev->dev, "Hardware reset detected\n");
queue_work(iavf_wq, &adapter->reset_task);
- goto watchdog_done;
+ mutex_unlock(&adapter->crit_lock);
+ queue_delayed_work(iavf_wq,
+ &adapter->watchdog_task, HZ * 2);
+ return;
}
schedule_delayed_work(&adapter->client_task, msecs_to_jiffies(5));
-watchdog_done:
- if (adapter->state == __IAVF_RUNNING ||
- adapter->state == __IAVF_COMM_FAILED)
- iavf_detect_recover_hung(&adapter->vsi);
mutex_unlock(&adapter->crit_lock);
restart_watchdog:
+ queue_work(iavf_wq, &adapter->adminq_task);
if (adapter->aq_required)
queue_delayed_work(iavf_wq, &adapter->watchdog_task,
msecs_to_jiffies(20));
else
queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2);
- queue_work(iavf_wq, &adapter->adminq_task);
}
static void iavf_disable_vf(struct iavf_adapter *adapter)
@@ -2081,7 +2121,7 @@ static void iavf_disable_vf(struct iavf_adapter *adapter)
adapter->netdev->flags &= ~IFF_UP;
mutex_unlock(&adapter->crit_lock);
adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
- adapter->state = __IAVF_DOWN;
+ iavf_change_state(adapter, __IAVF_DOWN);
wake_up(&adapter->down_waitqueue);
dev_info(&adapter->pdev->dev, "Reset task did not complete, VF disabled\n");
}
@@ -2191,7 +2231,7 @@ continue_reset:
}
iavf_irq_disable(adapter);
- adapter->state = __IAVF_RESETTING;
+ iavf_change_state(adapter, __IAVF_RESETTING);
adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
/* free the Tx/Rx rings and descriptors, might be better to just
@@ -2291,11 +2331,14 @@ continue_reset:
iavf_configure(adapter);
+ /* iavf_up_complete() will switch device back
+ * to __IAVF_RUNNING
+ */
iavf_up_complete(adapter);
iavf_irq_enable(adapter, true);
} else {
- adapter->state = __IAVF_DOWN;
+ iavf_change_state(adapter, __IAVF_DOWN);
wake_up(&adapter->down_waitqueue);
}
mutex_unlock(&adapter->client_lock);
@@ -2305,6 +2348,8 @@ continue_reset:
reset_err:
mutex_unlock(&adapter->client_lock);
mutex_unlock(&adapter->crit_lock);
+ if (running)
+ iavf_change_state(adapter, __IAVF_RUNNING);
dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
iavf_close(netdev);
}
@@ -3297,7 +3342,7 @@ static int iavf_close(struct net_device *netdev)
adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_CLOSE;
iavf_down(adapter);
- adapter->state = __IAVF_DOWN_PENDING;
+ iavf_change_state(adapter, __IAVF_DOWN_PENDING);
iavf_free_traffic_irqs(adapter);
mutex_unlock(&adapter->crit_lock);
@@ -3631,64 +3676,6 @@ int iavf_process_config(struct iavf_adapter *adapter)
}
/**
- * iavf_init_task - worker thread to perform delayed initialization
- * @work: pointer to work_struct containing our data
- *
- * This task completes the work that was begun in probe. Due to the nature
- * of VF-PF communications, we may need to wait tens of milliseconds to get
- * responses back from the PF. Rather than busy-wait in probe and bog down the
- * whole system, we'll do it in a task so we can sleep.
- * This task only runs during driver init. Once we've established
- * communications with the PF driver and set up our netdev, the watchdog
- * takes over.
- **/
-static void iavf_init_task(struct work_struct *work)
-{
- struct iavf_adapter *adapter = container_of(work,
- struct iavf_adapter,
- init_task.work);
- struct iavf_hw *hw = &adapter->hw;
-
- if (iavf_lock_timeout(&adapter->crit_lock, 5000)) {
- dev_warn(&adapter->pdev->dev, "failed to acquire crit_lock in %s\n", __FUNCTION__);
- return;
- }
- switch (adapter->state) {
- case __IAVF_STARTUP:
- if (iavf_startup(adapter) < 0)
- goto init_failed;
- break;
- case __IAVF_INIT_VERSION_CHECK:
- if (iavf_init_version_check(adapter) < 0)
- goto init_failed;
- break;
- case __IAVF_INIT_GET_RESOURCES:
- if (iavf_init_get_resources(adapter) < 0)
- goto init_failed;
- goto out;
- default:
- goto init_failed;
- }
-
- queue_delayed_work(iavf_wq, &adapter->init_task,
- msecs_to_jiffies(30));
- goto out;
-init_failed:
- if (++adapter->aq_wait_count > IAVF_AQ_MAX_ERR) {
- dev_err(&adapter->pdev->dev,
- "Failed to communicate with PF; waiting before retry\n");
- adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED;
- iavf_shutdown_adminq(hw);
- adapter->state = __IAVF_STARTUP;
- queue_delayed_work(iavf_wq, &adapter->init_task, HZ * 5);
- goto out;
- }
- queue_delayed_work(iavf_wq, &adapter->init_task, HZ);
-out:
- mutex_unlock(&adapter->crit_lock);
-}
-
-/**
* iavf_shutdown - Shutdown the device in preparation for a reboot
* @pdev: pci device structure
**/
@@ -3705,7 +3692,7 @@ static void iavf_shutdown(struct pci_dev *pdev)
if (iavf_lock_timeout(&adapter->crit_lock, 5000))
dev_warn(&adapter->pdev->dev, "failed to acquire crit_lock in %s\n", __FUNCTION__);
/* Prevent the watchdog from running. */
- adapter->state = __IAVF_REMOVE;
+ iavf_change_state(adapter, __IAVF_REMOVE);
adapter->aq_required = 0;
mutex_unlock(&adapter->crit_lock);
@@ -3778,7 +3765,7 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw->back = adapter;
adapter->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
- adapter->state = __IAVF_STARTUP;
+ iavf_change_state(adapter, __IAVF_STARTUP);
/* Call save state here because it relies on the adapter struct. */
pci_save_state(pdev);
@@ -3822,8 +3809,7 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
INIT_WORK(&adapter->adminq_task, iavf_adminq_task);
INIT_DELAYED_WORK(&adapter->watchdog_task, iavf_watchdog_task);
INIT_DELAYED_WORK(&adapter->client_task, iavf_client_task);
- INIT_DELAYED_WORK(&adapter->init_task, iavf_init_task);
- queue_delayed_work(iavf_wq, &adapter->init_task,
+ queue_delayed_work(iavf_wq, &adapter->watchdog_task,
msecs_to_jiffies(5 * (pdev->devfn & 0x07)));
/* Setup the wait queue for indicating transition to down status */
@@ -3929,8 +3915,8 @@ static void iavf_remove(struct pci_dev *pdev)
int err;
/* Indicate we are in remove and not to run reset_task */
mutex_lock(&adapter->remove_lock);
- cancel_delayed_work_sync(&adapter->init_task);
cancel_work_sync(&adapter->reset_task);
+ cancel_delayed_work_sync(&adapter->watchdog_task);
cancel_delayed_work_sync(&adapter->client_task);
if (adapter->netdev_registered) {
unregister_netdev(netdev);
@@ -3954,7 +3940,7 @@ static void iavf_remove(struct pci_dev *pdev)
dev_warn(&adapter->pdev->dev, "failed to acquire crit_lock in %s\n", __FUNCTION__);
/* Shut down all the garbage mashers on the detention level */
- adapter->state = __IAVF_REMOVE;
+ iavf_change_state(adapter, __IAVF_REMOVE);
adapter->aq_required = 0;
adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
iavf_free_all_tx_resources(adapter);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
index 3c735968e1b8..8c3f0f77cb57 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
@@ -1685,7 +1685,7 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
if (!v_retval)
iavf_mac_add_ok(adapter);
if (!ether_addr_equal(netdev->dev_addr, adapter->hw.mac.addr))
- ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
+ eth_hw_addr_set(netdev, adapter->hw.mac.addr);
break;
case VIRTCHNL_OP_GET_STATS: {
struct iavf_eth_stats *stats =
@@ -1716,7 +1716,7 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
} else {
/* refresh current mac address if changed */
- ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
+ eth_hw_addr_set(netdev, adapter->hw.mac.addr);
ether_addr_copy(netdev->perm_addr,
adapter->hw.mac.addr);
}
@@ -1735,7 +1735,7 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
iavf_free_all_tx_resources(adapter);
iavf_free_all_rx_resources(adapter);
if (adapter->state == __IAVF_DOWN_PENDING) {
- adapter->state = __IAVF_DOWN;
+ iavf_change_state(adapter, __IAVF_DOWN);
wake_up(&adapter->down_waitqueue);
}
break;
diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile
index 4f538cdf42c1..c36faa7d1471 100644
--- a/drivers/net/ethernet/intel/ice/Makefile
+++ b/drivers/net/ethernet/intel/ice/Makefile
@@ -26,10 +26,13 @@ ice-y := ice_main.o \
ice_devlink.o \
ice_fw_update.o \
ice_lag.o \
- ice_ethtool.o
+ ice_ethtool.o \
+ ice_repr.o \
+ ice_tc_lib.o
ice-$(CONFIG_PCI_IOV) += ice_virtchnl_allowlist.o
ice-$(CONFIG_PCI_IOV) += ice_virtchnl_pf.o ice_sriov.o ice_virtchnl_fdir.o
ice-$(CONFIG_PTP_1588_CLOCK) += ice_ptp.o ice_ptp_hw.o
ice-$(CONFIG_DCB) += ice_dcb.o ice_dcb_nl.o ice_dcb_lib.o
ice-$(CONFIG_RFS_ACCEL) += ice_arfs.o
ice-$(CONFIG_XDP_SOCKETS) += ice_xsk.o
+ice-$(CONFIG_ICE_SWITCHDEV) += ice_eswitch.o
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 3c4f08d20414..967a90efcb11 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -38,6 +38,10 @@
#include <linux/avf/virtchnl.h>
#include <linux/cpu_rmap.h>
#include <linux/dim.h>
+#include <net/pkt_cls.h>
+#include <net/tc_act/tc_mirred.h>
+#include <net/tc_act/tc_gact.h>
+#include <net/ip.h>
#include <net/devlink.h>
#include <net/ipv6.h>
#include <net/xdp_sock.h>
@@ -55,6 +59,7 @@
#include "ice_dcb.h"
#include "ice_switch.h"
#include "ice_common.h"
+#include "ice_flow.h"
#include "ice_sched.h"
#include "ice_idc_int.h"
#include "ice_virtchnl_pf.h"
@@ -63,6 +68,8 @@
#include "ice_fdir.h"
#include "ice_xsk.h"
#include "ice_arfs.h"
+#include "ice_repr.h"
+#include "ice_eswitch.h"
#include "ice_lag.h"
#define ICE_BAR0 0
@@ -84,6 +91,7 @@
#define ICE_FDIR_MSIX 2
#define ICE_RDMA_NUM_AEQ_MSIX 4
#define ICE_MIN_RDMA_MSIX 2
+#define ICE_ESWITCH_MSIX 1
#define ICE_NO_VSI 0xffff
#define ICE_VSI_MAP_CONTIG 0
#define ICE_VSI_MAP_SCATTER 1
@@ -101,6 +109,10 @@
#define ICE_INVAL_VFID 256
#define ICE_MAX_RXQS_PER_TC 256 /* Used when setting VSI context per TC Rx queues */
+
+#define ICE_CHNL_START_TC 1
+#define ICE_CHNL_MAX_TC 16
+
#define ICE_MAX_RESET_WAIT 20
#define ICE_VSIQF_HKEY_ARRAY_SIZE ((VSIQF_HKEY_MAX_INDEX + 1) * 4)
@@ -118,14 +130,24 @@
#define ICE_TX_CTX_DESC(R, i) (&(((struct ice_tx_ctx_desc *)((R)->desc))[i]))
#define ICE_TX_FDIRDESC(R, i) (&(((struct ice_fltr_desc *)((R)->desc))[i]))
+/* Minimum BW limit is 500 Kbps for any scheduler node */
+#define ICE_MIN_BW_LIMIT 500
+/* User can specify BW in either Kbit/Mbit/Gbit and OS converts it in bytes.
+ * use it to convert user specified BW limit into Kbps
+ */
+#define ICE_BW_KBPS_DIVISOR 125
+
/* Macro for each VSI in a PF */
#define ice_for_each_vsi(pf, i) \
for ((i) = 0; (i) < (pf)->num_alloc_vsi; (i)++)
-/* Macros for each Tx/Rx ring in a VSI */
+/* Macros for each Tx/Xdp/Rx ring in a VSI */
#define ice_for_each_txq(vsi, i) \
for ((i) = 0; (i) < (vsi)->num_txq; (i)++)
+#define ice_for_each_xdp_txq(vsi, i) \
+ for ((i) = 0; (i) < (vsi)->num_xdp_txq; (i)++)
+
#define ice_for_each_rxq(vsi, i) \
for ((i) = 0; (i) < (vsi)->num_rxq; (i)++)
@@ -139,6 +161,9 @@
#define ice_for_each_q_vector(vsi, i) \
for ((i) = 0; (i) < (vsi)->num_q_vectors; (i)++)
+#define ice_for_each_chnl_tc(i) \
+ for ((i) = ICE_CHNL_START_TC; (i) < ICE_CHNL_MAX_TC; (i)++)
+
#define ICE_UCAST_PROMISC_BITS (ICE_PROMISC_UCAST_TX | ICE_PROMISC_MCAST_TX | \
ICE_PROMISC_UCAST_RX | ICE_PROMISC_MCAST_RX)
@@ -158,6 +183,29 @@
#define ice_pf_to_dev(pf) (&((pf)->pdev->dev))
+enum ice_feature {
+ ICE_F_DSCP,
+ ICE_F_SMA_CTRL,
+ ICE_F_MAX
+};
+
+DECLARE_STATIC_KEY_FALSE(ice_xdp_locking_key);
+
+struct ice_channel {
+ struct list_head list;
+ u8 type;
+ u16 sw_id;
+ u16 base_q;
+ u16 num_rxq;
+ u16 num_txq;
+ u16 vsi_num;
+ u8 ena_tc;
+ struct ice_aqc_vsi_props info;
+ u64 max_tx_rate;
+ u64 min_tx_rate;
+ struct ice_vsi *ch_vsi;
+};
+
struct ice_txq_meta {
u32 q_teid; /* Tx-scheduler element identifier */
u16 q_id; /* Entry in VSI's txq_map bitmap */
@@ -175,7 +223,7 @@ struct ice_tc_info {
struct ice_tc_cfg {
u8 numtc; /* Total number of enabled TCs */
- u8 ena_tc; /* Tx map */
+ u16 ena_tc; /* Tx map */
struct ice_tc_info tc_info[ICE_MAX_TRAFFIC_CLASS];
};
@@ -266,8 +314,8 @@ struct ice_vsi {
struct ice_sw *vsw; /* switch this VSI is on */
struct ice_pf *back; /* back pointer to PF */
struct ice_port_info *port_info; /* back pointer to port_info */
- struct ice_ring **rx_rings; /* Rx ring array */
- struct ice_ring **tx_rings; /* Tx ring array */
+ struct ice_rx_ring **rx_rings; /* Rx ring array */
+ struct ice_tx_ring **tx_rings; /* Tx ring array */
struct ice_q_vector **q_vectors; /* q_vector array */
irqreturn_t (*irq_handler)(int irq, void *data);
@@ -306,10 +354,6 @@ struct ice_vsi {
spinlock_t arfs_lock; /* protects aRFS hash table and filter state */
atomic_t *arfs_last_fltr_id;
- /* devlink port data */
- struct devlink_port devlink_port;
- bool devlink_port_registered;
-
u16 max_frame;
u16 rx_buf_len;
@@ -344,11 +388,42 @@ struct ice_vsi {
u16 qset_handle[ICE_MAX_TRAFFIC_CLASS];
struct ice_tc_cfg tc_cfg;
struct bpf_prog *xdp_prog;
- struct ice_ring **xdp_rings; /* XDP ring array */
+ struct ice_tx_ring **xdp_rings; /* XDP ring array */
unsigned long *af_xdp_zc_qps; /* tracks AF_XDP ZC enabled qps */
u16 num_xdp_txq; /* Used XDP queues */
u8 xdp_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */
+ struct net_device **target_netdevs;
+
+ struct tc_mqprio_qopt_offload mqprio_qopt; /* queue parameters */
+
+ /* Channel Specific Fields */
+ struct ice_vsi *tc_map_vsi[ICE_CHNL_MAX_TC];
+ u16 cnt_q_avail;
+ u16 next_base_q; /* next queue to be used for channel setup */
+ struct list_head ch_list;
+ u16 num_chnl_rxq;
+ u16 num_chnl_txq;
+ u16 ch_rss_size;
+ u16 num_chnl_fltr;
+ /* store away rss size info before configuring ADQ channels so that,
+ * it can be used after tc-qdisc delete, to get back RSS setting as
+ * they were before
+ */
+ u16 orig_rss_size;
+ /* this keeps tracks of all enabled TC with and without DCB
+ * and inclusive of ADQ, vsi->mqprio_opt keeps track of queue
+ * information
+ */
+ u8 all_numtc;
+ u16 all_enatc;
+
+ /* store away TC info, to be used for rebuild logic */
+ u8 old_numtc;
+ u16 old_ena_tc;
+
+ struct ice_channel *ch;
+
/* setup back reference, to which aggregator node this VSI
* corresponds to
*/
@@ -377,6 +452,8 @@ struct ice_q_vector {
cpumask_t affinity_mask;
struct irq_affinity_notify affinity_notify;
+ struct ice_channel *ch;
+
char name[ICE_INT_NAME_STR_LEN];
u16 total_events; /* net_dim(): number of interrupts processed */
@@ -395,6 +472,8 @@ enum ice_pf_flags {
ICE_FLAG_PTP, /* PTP is enabled by software */
ICE_FLAG_AUX_ENA,
ICE_FLAG_ADV_FEATURES,
+ ICE_FLAG_TC_MQPRIO, /* support for Multi queue TC */
+ ICE_FLAG_CLS_FLOWER,
ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA,
ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA,
ICE_FLAG_NO_MEDIA,
@@ -408,6 +487,12 @@ enum ice_pf_flags {
ICE_PF_FLAGS_NBITS /* must be last */
};
+struct ice_switchdev_info {
+ struct ice_vsi *control_vsi;
+ struct ice_vsi *uplink_vsi;
+ bool is_running;
+};
+
struct ice_agg_node {
u32 agg_id;
#define ICE_MAX_VSIS_IN_AGG_NODE 64
@@ -421,6 +506,9 @@ struct ice_pf {
struct devlink_region *nvm_region;
struct devlink_region *devcaps_region;
+ /* devlink port data */
+ struct devlink_port devlink_port;
+
/* OS reserved IRQ details */
struct msix_entry *msix_entries;
struct ice_res_tracker *irq_tracker;
@@ -434,6 +522,7 @@ struct ice_pf {
struct ice_vsi **vsi; /* VSIs created by the driver */
struct ice_sw *first_sw; /* first switch created by firmware */
+ u16 eswitch_mode; /* current mode of eswitch */
/* Virtchnl/SR-IOV config info */
struct ice_vf *vf;
u16 num_alloc_vfs; /* actual number of VFs allocated */
@@ -443,6 +532,7 @@ struct ice_pf {
/* used to ratelimit the MDD event logging */
unsigned long last_printed_mdd_jiffies;
DECLARE_BITMAP(malvfs, ICE_MAX_VF_COUNT);
+ DECLARE_BITMAP(features, ICE_F_MAX);
DECLARE_BITMAP(state, ICE_STATE_NBITS);
DECLARE_BITMAP(flags, ICE_PF_FLAGS_NBITS);
unsigned long *avail_txqs; /* bitmap to track PF Tx queue usage */
@@ -495,12 +585,19 @@ struct ice_pf {
struct auxiliary_device *adev;
int aux_idx;
u32 sw_int_count;
+ /* count of tc_flower filters specific to channel (aka where filter
+ * action is "hw_tc <tc_num>")
+ */
+ u16 num_dmac_chnl_fltrs;
+ struct hlist_head tc_flower_fltr_list;
__le64 nvm_phy_type_lo; /* NVM PHY type low */
__le64 nvm_phy_type_hi; /* NVM PHY type high */
struct ice_link_default_override_tlv link_dflt_override;
struct ice_lag *lag; /* Link Aggregation information */
+ struct ice_switchdev_info switchdev;
+
#define ICE_INVALID_AGG_NODE_ID 0
#define ICE_PF_AGG_NODE_ID_START 1
#define ICE_MAX_PF_AGG_NODES 32
@@ -512,9 +609,21 @@ struct ice_pf {
struct ice_netdev_priv {
struct ice_vsi *vsi;
+ struct ice_repr *repr;
};
/**
+ * ice_vector_ch_enabled
+ * @qv: pointer to q_vector, can be NULL
+ *
+ * This function returns true if vector is channel enabled otherwise false
+ */
+static inline bool ice_vector_ch_enabled(struct ice_q_vector *qv)
+{
+ return !!qv->ch; /* Enable it to run with TC */
+}
+
+/**
* ice_irq_dynamic_ena - Enable default interrupt generation settings
* @hw: pointer to HW struct
* @vsi: pointer to VSI struct, can be NULL
@@ -556,25 +665,42 @@ static inline bool ice_is_xdp_ena_vsi(struct ice_vsi *vsi)
return !!vsi->xdp_prog;
}
-static inline void ice_set_ring_xdp(struct ice_ring *ring)
+static inline void ice_set_ring_xdp(struct ice_tx_ring *ring)
{
ring->flags |= ICE_TX_FLAGS_RING_XDP;
}
/**
* ice_xsk_pool - get XSK buffer pool bound to a ring
- * @ring: ring to use
+ * @ring: Rx ring to use
*
* Returns a pointer to xdp_umem structure if there is a buffer pool present,
* NULL otherwise.
*/
-static inline struct xsk_buff_pool *ice_xsk_pool(struct ice_ring *ring)
+static inline struct xsk_buff_pool *ice_xsk_pool(struct ice_rx_ring *ring)
{
struct ice_vsi *vsi = ring->vsi;
u16 qid = ring->q_index;
- if (ice_ring_is_xdp(ring))
- qid -= vsi->num_xdp_txq;
+ if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps))
+ return NULL;
+
+ return xsk_get_pool_from_qid(vsi->netdev, qid);
+}
+
+/**
+ * ice_tx_xsk_pool - get XSK buffer pool bound to a ring
+ * @ring: Tx ring to use
+ *
+ * Returns a pointer to xdp_umem structure if there is a buffer pool present,
+ * NULL otherwise. Tx equivalent of ice_xsk_pool.
+ */
+static inline struct xsk_buff_pool *ice_tx_xsk_pool(struct ice_tx_ring *ring)
+{
+ struct ice_vsi *vsi = ring->vsi;
+ u16 qid;
+
+ qid = ring->q_index - vsi->num_xdp_txq;
if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps))
return NULL;
@@ -597,6 +723,19 @@ static inline struct ice_vsi *ice_get_main_vsi(struct ice_pf *pf)
}
/**
+ * ice_get_netdev_priv_vsi - return VSI associated with netdev priv.
+ * @np: private netdev structure
+ */
+static inline struct ice_vsi *ice_get_netdev_priv_vsi(struct ice_netdev_priv *np)
+{
+ /* In case of port representor return source port VSI. */
+ if (np->repr)
+ return np->repr->src_vsi;
+ else
+ return np->vsi;
+}
+
+/**
* ice_get_ctrl_vsi - Get the control VSI
* @pf: PF instance
*/
@@ -610,6 +749,18 @@ static inline struct ice_vsi *ice_get_ctrl_vsi(struct ice_pf *pf)
}
/**
+ * ice_is_switchdev_running - check if switchdev is configured
+ * @pf: pointer to PF structure
+ *
+ * Returns true if eswitch mode is set to DEVLINK_ESWITCH_MODE_SWITCHDEV
+ * and switchdev is configured, false otherwise.
+ */
+static inline bool ice_is_switchdev_running(struct ice_pf *pf)
+{
+ return pf->switchdev.is_running;
+}
+
+/**
* ice_set_sriov_cap - enable SRIOV in PF flags
* @pf: PF struct
*/
@@ -633,11 +784,37 @@ static inline void ice_clear_sriov_cap(struct ice_pf *pf)
((base_idx) * ICE_FD_STAT_CTR_BLOCK_COUNT)
#define ICE_FD_SB_STAT_IDX(base_idx) ICE_FD_STAT_PF_IDX(base_idx)
+/**
+ * ice_is_adq_active - any active ADQs
+ * @pf: pointer to PF
+ *
+ * This function returns true if there are any ADQs configured (which is
+ * determined by looking at VSI type (which should be VSI_PF), numtc, and
+ * TC_MQPRIO flag) otherwise return false
+ */
+static inline bool ice_is_adq_active(struct ice_pf *pf)
+{
+ struct ice_vsi *vsi;
+
+ vsi = ice_get_main_vsi(pf);
+ if (!vsi)
+ return false;
+
+ /* is ADQ configured */
+ if (vsi->tc_cfg.numtc > ICE_CHNL_START_TC &&
+ test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
+ return true;
+
+ return false;
+}
+
bool netif_is_ice(struct net_device *dev);
int ice_vsi_setup_tx_rings(struct ice_vsi *vsi);
int ice_vsi_setup_rx_rings(struct ice_vsi *vsi);
int ice_vsi_open_ctrl(struct ice_vsi *vsi);
+int ice_vsi_open(struct ice_vsi *vsi);
void ice_set_ethtool_ops(struct net_device *netdev);
+void ice_set_ethtool_repr_ops(struct net_device *netdev);
void ice_set_ethtool_safe_mode_ops(struct net_device *netdev);
u16 ice_get_avail_txq_count(struct ice_pf *pf);
u16 ice_get_avail_rxq_count(struct ice_pf *pf);
@@ -648,6 +825,7 @@ int ice_up(struct ice_vsi *vsi);
int ice_down(struct ice_vsi *vsi);
int ice_vsi_cfg(struct ice_vsi *vsi);
struct ice_vsi *ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi);
+int ice_vsi_determine_xdp_res(struct ice_vsi *vsi);
int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog);
int ice_destroy_xdp_rings(struct ice_vsi *vsi);
int
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index 21b4c7cd6f05..a5425f0dce3f 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -233,6 +233,7 @@ struct ice_aqc_get_sw_cfg_resp_elem {
*/
#define ICE_AQC_RES_TYPE_VSI_LIST_REP 0x03
#define ICE_AQC_RES_TYPE_VSI_LIST_PRUNE 0x04
+#define ICE_AQC_RES_TYPE_RECIPE 0x05
#define ICE_AQC_RES_TYPE_FDIR_COUNTER_BLOCK 0x21
#define ICE_AQC_RES_TYPE_FDIR_GUARANTEED_ENTRIES 0x22
#define ICE_AQC_RES_TYPE_FDIR_SHARED_ENTRIES 0x23
@@ -241,6 +242,7 @@ struct ice_aqc_get_sw_cfg_resp_elem {
#define ICE_AQC_RES_TYPE_HASH_PROF_BLDR_PROFID 0x60
#define ICE_AQC_RES_TYPE_HASH_PROF_BLDR_TCAM 0x61
+#define ICE_AQC_RES_TYPE_FLAG_SHARED BIT(7)
#define ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM BIT(12)
#define ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX BIT(13)
@@ -474,6 +476,53 @@ struct ice_aqc_vsi_props {
#define ICE_MAX_NUM_RECIPES 64
+/* Add/Get Recipe (indirect 0x0290/0x0292) */
+struct ice_aqc_add_get_recipe {
+ __le16 num_sub_recipes; /* Input in Add cmd, Output in Get cmd */
+ __le16 return_index; /* Input, used for Get cmd only */
+ u8 reserved[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+struct ice_aqc_recipe_content {
+ u8 rid;
+#define ICE_AQ_RECIPE_ID_IS_ROOT BIT(7)
+#define ICE_AQ_SW_ID_LKUP_IDX 0
+ u8 lkup_indx[5];
+#define ICE_AQ_RECIPE_LKUP_IGNORE BIT(7)
+#define ICE_AQ_SW_ID_LKUP_MASK 0x00FF
+ __le16 mask[5];
+ u8 result_indx;
+#define ICE_AQ_RECIPE_RESULT_DATA_S 0
+#define ICE_AQ_RECIPE_RESULT_DATA_M (0x3F << ICE_AQ_RECIPE_RESULT_DATA_S)
+#define ICE_AQ_RECIPE_RESULT_EN BIT(7)
+ u8 rsvd0[3];
+ u8 act_ctrl_join_priority;
+ u8 act_ctrl_fwd_priority;
+ u8 act_ctrl;
+#define ICE_AQ_RECIPE_ACT_INV_ACT BIT(2)
+ u8 rsvd1;
+ __le32 dflt_act;
+};
+
+struct ice_aqc_recipe_data_elem {
+ u8 recipe_indx;
+ u8 resp_bits;
+ u8 rsvd0[2];
+ u8 recipe_bitmap[8];
+ u8 rsvd1[4];
+ struct ice_aqc_recipe_content content;
+ u8 rsvd2[20];
+};
+
+/* Set/Get Recipes to Profile Association (direct 0x0291/0x0293) */
+struct ice_aqc_recipe_to_profile {
+ __le16 profile_id;
+ u8 rsvd[6];
+ DECLARE_BITMAP(recipe_assoc, ICE_MAX_NUM_RECIPES);
+};
+
/* Add/Update/Remove/Get switch rules (indirect 0x02A0, 0x02A1, 0x02A2, 0x02A3)
*/
struct ice_aqc_sw_rules {
@@ -671,6 +720,16 @@ struct ice_aqc_sw_rules_elem {
} __packed pdata;
};
+/* Query PFC Mode (direct 0x0302)
+ * Set PFC Mode (direct 0x0303)
+ */
+struct ice_aqc_set_query_pfc_mode {
+ u8 pfc_mode;
+/* For Query Command response, reserved in all other cases */
+#define ICE_AQC_PFC_VLAN_BASED_PFC 1
+#define ICE_AQC_PFC_DSCP_BASED_PFC 2
+ u8 rsvd[15];
+};
/* Get Default Topology (indirect 0x0400) */
struct ice_aqc_get_topo {
u8 port_num;
@@ -1220,7 +1279,7 @@ struct ice_aqc_set_mac_lb {
u8 reserved[15];
};
-struct ice_aqc_link_topo_addr {
+struct ice_aqc_link_topo_params {
u8 lport_num;
u8 lport_num_valid;
#define ICE_AQC_LINK_TOPO_PORT_NUM_VALID BIT(0)
@@ -1246,6 +1305,10 @@ struct ice_aqc_link_topo_addr {
#define ICE_AQC_LINK_TOPO_NODE_CTX_PROVIDED 4
#define ICE_AQC_LINK_TOPO_NODE_CTX_OVERRIDE 5
u8 index;
+};
+
+struct ice_aqc_link_topo_addr {
+ struct ice_aqc_link_topo_params topo_params;
__le16 handle;
#define ICE_AQC_LINK_TOPO_HANDLE_S 0
#define ICE_AQC_LINK_TOPO_HANDLE_M (0x3FF << ICE_AQC_LINK_TOPO_HANDLE_S)
@@ -1268,6 +1331,7 @@ struct ice_aqc_link_topo_addr {
struct ice_aqc_get_link_topo {
struct ice_aqc_link_topo_addr addr;
u8 node_part_num;
+#define ICE_AQC_GET_LINK_TOPO_NODE_NR_PCA9575 0x21
u8 rsvd[9];
};
@@ -1281,6 +1345,16 @@ struct ice_aqc_set_port_id_led {
u8 rsvd[13];
};
+/* Set/Get GPIO (direct, 0x06EC/0x06ED) */
+struct ice_aqc_gpio {
+ __le16 gpio_ctrl_handle;
+#define ICE_AQC_GPIO_HANDLE_S 0
+#define ICE_AQC_GPIO_HANDLE_M (0x3FF << ICE_AQC_GPIO_HANDLE_S)
+ u8 gpio_num;
+ u8 gpio_val;
+ u8 rsvd[12];
+};
+
/* Read/Write SFF EEPROM command (indirect 0x06EE) */
struct ice_aqc_sff_eeprom {
u8 lport_num;
@@ -1922,10 +1996,13 @@ struct ice_aq_desc {
struct ice_aqc_get_phy_caps get_phy;
struct ice_aqc_set_phy_cfg set_phy;
struct ice_aqc_restart_an restart_an;
+ struct ice_aqc_gpio read_write_gpio;
struct ice_aqc_sff_eeprom read_write_sff_param;
struct ice_aqc_set_port_id_led set_port_id_led;
struct ice_aqc_get_sw_cfg get_sw_conf;
struct ice_aqc_sw_rules sw_rules;
+ struct ice_aqc_add_get_recipe add_get_recipe;
+ struct ice_aqc_recipe_to_profile recipe_to_profile;
struct ice_aqc_get_topo get_topo;
struct ice_aqc_sched_elem_cmd sched_elem_cmd;
struct ice_aqc_query_txsched_res query_sched_res;
@@ -1936,6 +2013,7 @@ struct ice_aq_desc {
struct ice_aqc_nvm_pkg_data pkg_data;
struct ice_aqc_nvm_pass_comp_tbl pass_comp_tbl;
struct ice_aqc_pf_vf_msg virt;
+ struct ice_aqc_set_query_pfc_mode set_query_pfc_mode;
struct ice_aqc_lldp_get_mib lldp_get_mib;
struct ice_aqc_lldp_set_mib_change lldp_set_event;
struct ice_aqc_lldp_stop lldp_stop;
@@ -2033,6 +2111,12 @@ enum ice_adminq_opc {
ice_aqc_opc_update_vsi = 0x0211,
ice_aqc_opc_free_vsi = 0x0213,
+ /* recipe commands */
+ ice_aqc_opc_add_recipe = 0x0290,
+ ice_aqc_opc_recipe_to_profile = 0x0291,
+ ice_aqc_opc_get_recipe = 0x0292,
+ ice_aqc_opc_get_recipe_to_profile = 0x0293,
+
/* switch rules population commands */
ice_aqc_opc_add_sw_rules = 0x02A0,
ice_aqc_opc_update_sw_rules = 0x02A1,
@@ -2040,6 +2124,10 @@ enum ice_adminq_opc {
ice_aqc_opc_clear_pf_cfg = 0x02A4,
+ /* DCB commands */
+ ice_aqc_opc_query_pfc_mode = 0x0302,
+ ice_aqc_opc_set_pfc_mode = 0x0303,
+
/* transmit scheduler commands */
ice_aqc_opc_get_dflt_topo = 0x0400,
ice_aqc_opc_add_sched_elems = 0x0401,
@@ -2064,6 +2152,8 @@ enum ice_adminq_opc {
ice_aqc_opc_set_mac_lb = 0x0620,
ice_aqc_opc_get_link_topo = 0x06E0,
ice_aqc_opc_set_port_id_led = 0x06E9,
+ ice_aqc_opc_set_gpio = 0x06EC,
+ ice_aqc_opc_get_gpio = 0x06ED,
ice_aqc_opc_sff_eeprom = 0x06EE,
/* NVM commands */
diff --git a/drivers/net/ethernet/intel/ice/ice_arfs.c b/drivers/net/ethernet/intel/ice/ice_arfs.c
index 88d98c9e5f91..5daade32ea62 100644
--- a/drivers/net/ethernet/intel/ice/ice_arfs.c
+++ b/drivers/net/ethernet/intel/ice/ice_arfs.c
@@ -513,7 +513,7 @@ void ice_init_arfs(struct ice_vsi *vsi)
if (!vsi || vsi->type != ICE_VSI_PF)
return;
- arfs_fltr_list = kzalloc(sizeof(*arfs_fltr_list) * ICE_MAX_ARFS_LIST,
+ arfs_fltr_list = kcalloc(ICE_MAX_ARFS_LIST, sizeof(*arfs_fltr_list),
GFP_KERNEL);
if (!arfs_fltr_list)
return;
@@ -614,7 +614,7 @@ int ice_set_cpu_rx_rmap(struct ice_vsi *vsi)
return -EINVAL;
base_idx = vsi->base_vector;
- for (i = 0; i < vsi->num_q_vectors; i++)
+ ice_for_each_q_vector(vsi, i)
if (irq_cpu_rmap_add(netdev->rx_cpu_rmap,
pf->msix_entries[base_idx + i].vector)) {
ice_free_cpu_rx_rmap(vsi);
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
index c36057efc7ae..fa6cd63cbf1f 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -115,6 +115,8 @@ static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, u16 v_idx)
q_vector->rx.itr_setting = ICE_DFLT_RX_ITR;
q_vector->tx.itr_mode = ITR_DYNAMIC;
q_vector->rx.itr_mode = ITR_DYNAMIC;
+ q_vector->tx.type = ICE_TX_CONTAINER;
+ q_vector->rx.type = ICE_RX_CONTAINER;
if (vsi->type == ICE_VSI_VF)
goto out;
@@ -146,7 +148,8 @@ static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx)
{
struct ice_q_vector *q_vector;
struct ice_pf *pf = vsi->back;
- struct ice_ring *ring;
+ struct ice_tx_ring *tx_ring;
+ struct ice_rx_ring *rx_ring;
struct device *dev;
dev = ice_pf_to_dev(pf);
@@ -156,10 +159,10 @@ static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx)
}
q_vector = vsi->q_vectors[v_idx];
- ice_for_each_ring(ring, q_vector->tx)
- ring->q_vector = NULL;
- ice_for_each_ring(ring, q_vector->rx)
- ring->q_vector = NULL;
+ ice_for_each_tx_ring(tx_ring, q_vector->tx)
+ tx_ring->q_vector = NULL;
+ ice_for_each_rx_ring(rx_ring, q_vector->rx)
+ rx_ring->q_vector = NULL;
/* only VSI with an associated netdev is set up with NAPI */
if (vsi->netdev)
@@ -201,15 +204,18 @@ static void ice_cfg_itr_gran(struct ice_hw *hw)
}
/**
- * ice_calc_q_handle - calculate the queue handle
+ * ice_calc_txq_handle - calculate the queue handle
* @vsi: VSI that ring belongs to
* @ring: ring to get the absolute queue index
* @tc: traffic class number
*/
-static u16 ice_calc_q_handle(struct ice_vsi *vsi, struct ice_ring *ring, u8 tc)
+static u16 ice_calc_txq_handle(struct ice_vsi *vsi, struct ice_tx_ring *ring, u8 tc)
{
WARN_ONCE(ice_ring_is_xdp(ring) && tc, "XDP ring can't belong to TC other than 0\n");
+ if (ring->ch)
+ return ring->q_index - ring->ch->base_q;
+
/* Idea here for calculation is that we subtract the number of queue
* count from TC that ring belongs to from it's absolute queue index
* and as a result we get the queue's index within TC.
@@ -218,13 +224,37 @@ static u16 ice_calc_q_handle(struct ice_vsi *vsi, struct ice_ring *ring, u8 tc)
}
/**
+ * ice_eswitch_calc_txq_handle
+ * @ring: pointer to ring which unique index is needed
+ *
+ * To correctly work with many netdevs ring->q_index of Tx rings on switchdev
+ * VSI can repeat. Hardware ring setup requires unique q_index. Calculate it
+ * here by finding index in vsi->tx_rings of this ring.
+ *
+ * Return ICE_INVAL_Q_INDEX when index wasn't found. Should never happen,
+ * because VSI is get from ring->vsi, so it has to be present in this VSI.
+ */
+static u16 ice_eswitch_calc_txq_handle(struct ice_tx_ring *ring)
+{
+ struct ice_vsi *vsi = ring->vsi;
+ int i;
+
+ ice_for_each_txq(vsi, i) {
+ if (vsi->tx_rings[i] == ring)
+ return i;
+ }
+
+ return ICE_INVAL_Q_INDEX;
+}
+
+/**
* ice_cfg_xps_tx_ring - Configure XPS for a Tx ring
* @ring: The Tx ring to configure
*
* This enables/disables XPS for a given Tx descriptor ring
* based on the TCs enabled for the VSI that ring belongs to.
*/
-static void ice_cfg_xps_tx_ring(struct ice_ring *ring)
+static void ice_cfg_xps_tx_ring(struct ice_tx_ring *ring)
{
if (!ring->q_vector || !ring->netdev)
return;
@@ -246,7 +276,7 @@ static void ice_cfg_xps_tx_ring(struct ice_ring *ring)
* Configure the Tx descriptor ring in TLAN context.
*/
static void
-ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
+ice_setup_tx_ctx(struct ice_tx_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
{
struct ice_vsi *vsi = ring->vsi;
struct ice_hw *hw = &vsi->back->hw;
@@ -258,7 +288,7 @@ ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
/* Transmit Queue Length */
tlan_ctx->qlen = ring->count;
- ice_set_cgd_num(tlan_ctx, ring);
+ ice_set_cgd_num(tlan_ctx, ring->dcb_tc);
/* PF number */
tlan_ctx->pf_num = hw->pf_id;
@@ -273,19 +303,28 @@ ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
case ICE_VSI_LB:
case ICE_VSI_CTRL:
case ICE_VSI_PF:
- tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
+ if (ring->ch)
+ tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VMQ;
+ else
+ tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
break;
case ICE_VSI_VF:
/* Firmware expects vmvf_num to be absolute VF ID */
tlan_ctx->vmvf_num = hw->func_caps.vf_base_id + vsi->vf_id;
tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VF;
break;
+ case ICE_VSI_SWITCHDEV_CTRL:
+ tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VMQ;
+ break;
default:
return;
}
/* make sure the context is associated with the right VSI */
- tlan_ctx->src_vsi = ice_get_hw_vsi_num(hw, vsi->idx);
+ if (ring->ch)
+ tlan_ctx->src_vsi = ring->ch->vsi_num;
+ else
+ tlan_ctx->src_vsi = ice_get_hw_vsi_num(hw, vsi->idx);
/* Restrict Tx timestamps to the PF VSI */
switch (vsi->type) {
@@ -312,7 +351,7 @@ ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
*
* Returns the offset value for ring into the data buffer.
*/
-static unsigned int ice_rx_offset(struct ice_ring *rx_ring)
+static unsigned int ice_rx_offset(struct ice_rx_ring *rx_ring)
{
if (ice_ring_uses_build_skb(rx_ring))
return ICE_SKB_PAD;
@@ -328,7 +367,7 @@ static unsigned int ice_rx_offset(struct ice_ring *rx_ring)
*
* Configure the Rx descriptor ring in RLAN context.
*/
-static int ice_setup_rx_ctx(struct ice_ring *ring)
+static int ice_setup_rx_ctx(struct ice_rx_ring *ring)
{
int chain_len = ICE_MAX_CHAINED_RX_BUFS;
struct ice_vsi *vsi = ring->vsi;
@@ -439,7 +478,7 @@ static int ice_setup_rx_ctx(struct ice_ring *ring)
*
* Return 0 on success and a negative value on error.
*/
-int ice_vsi_cfg_rxq(struct ice_ring *ring)
+int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
{
struct device *dev = ice_pf_to_dev(ring->vsi->back);
u16 num_bufs = ICE_DESC_UNUSED(ring);
@@ -660,16 +699,16 @@ void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
tx_rings_per_v = (u8)DIV_ROUND_UP(tx_rings_rem,
q_vectors - v_id);
q_vector->num_ring_tx = tx_rings_per_v;
- q_vector->tx.ring = NULL;
+ q_vector->tx.tx_ring = NULL;
q_vector->tx.itr_idx = ICE_TX_ITR;
q_base = vsi->num_txq - tx_rings_rem;
for (q_id = q_base; q_id < (q_base + tx_rings_per_v); q_id++) {
- struct ice_ring *tx_ring = vsi->tx_rings[q_id];
+ struct ice_tx_ring *tx_ring = vsi->tx_rings[q_id];
tx_ring->q_vector = q_vector;
- tx_ring->next = q_vector->tx.ring;
- q_vector->tx.ring = tx_ring;
+ tx_ring->next = q_vector->tx.tx_ring;
+ q_vector->tx.tx_ring = tx_ring;
}
tx_rings_rem -= tx_rings_per_v;
@@ -677,16 +716,16 @@ void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
rx_rings_per_v = (u8)DIV_ROUND_UP(rx_rings_rem,
q_vectors - v_id);
q_vector->num_ring_rx = rx_rings_per_v;
- q_vector->rx.ring = NULL;
+ q_vector->rx.rx_ring = NULL;
q_vector->rx.itr_idx = ICE_RX_ITR;
q_base = vsi->num_rxq - rx_rings_rem;
for (q_id = q_base; q_id < (q_base + rx_rings_per_v); q_id++) {
- struct ice_ring *rx_ring = vsi->rx_rings[q_id];
+ struct ice_rx_ring *rx_ring = vsi->rx_rings[q_id];
rx_ring->q_vector = q_vector;
- rx_ring->next = q_vector->rx.ring;
- q_vector->rx.ring = rx_ring;
+ rx_ring->next = q_vector->rx.rx_ring;
+ q_vector->rx.rx_ring = rx_ring;
}
rx_rings_rem -= rx_rings_per_v;
}
@@ -711,12 +750,13 @@ void ice_vsi_free_q_vectors(struct ice_vsi *vsi)
* @qg_buf: queue group buffer
*/
int
-ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring,
+ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring,
struct ice_aqc_add_tx_qgrp *qg_buf)
{
u8 buf_len = struct_size(qg_buf, txqs, 1);
struct ice_tlan_ctx tlan_ctx = { 0 };
struct ice_aqc_add_txqs_perq *txq;
+ struct ice_channel *ch = ring->ch;
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
enum ice_status status;
@@ -746,10 +786,23 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring,
/* Add unique software queue handle of the Tx queue per
* TC into the VSI Tx ring
*/
- ring->q_handle = ice_calc_q_handle(vsi, ring, tc);
+ if (vsi->type == ICE_VSI_SWITCHDEV_CTRL) {
+ ring->q_handle = ice_eswitch_calc_txq_handle(ring);
- status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc, ring->q_handle,
- 1, qg_buf, buf_len, NULL);
+ if (ring->q_handle == ICE_INVAL_Q_INDEX)
+ return -ENODEV;
+ } else {
+ ring->q_handle = ice_calc_txq_handle(vsi, ring, tc);
+ }
+
+ if (ch)
+ status = ice_ena_vsi_txq(vsi->port_info, ch->ch_vsi->idx, 0,
+ ring->q_handle, 1, qg_buf, buf_len,
+ NULL);
+ else
+ status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc,
+ ring->q_handle, 1, qg_buf, buf_len,
+ NULL);
if (status) {
dev_err(ice_pf_to_dev(pf), "Failed to set LAN Tx queue context, error: %s\n",
ice_stat_str(status));
@@ -870,7 +923,7 @@ void ice_trigger_sw_intr(struct ice_hw *hw, struct ice_q_vector *q_vector)
*/
int
ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
- u16 rel_vmvf_num, struct ice_ring *ring,
+ u16 rel_vmvf_num, struct ice_tx_ring *ring,
struct ice_txq_meta *txq_meta)
{
struct ice_pf *pf = vsi->back;
@@ -927,9 +980,10 @@ ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
* are needed for stopping Tx queue
*/
void
-ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_ring *ring,
+ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_tx_ring *ring,
struct ice_txq_meta *txq_meta)
{
+ struct ice_channel *ch = ring->ch;
u8 tc;
if (IS_ENABLED(CONFIG_DCB))
@@ -940,6 +994,11 @@ ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_ring *ring,
txq_meta->q_id = ring->reg_idx;
txq_meta->q_teid = ring->txq_teid;
txq_meta->q_handle = ring->q_handle;
- txq_meta->vsi_idx = vsi->idx;
- txq_meta->tc = tc;
+ if (ch) {
+ txq_meta->vsi_idx = ch->ch_vsi->idx;
+ txq_meta->tc = 0;
+ } else {
+ txq_meta->vsi_idx = vsi->idx;
+ txq_meta->tc = tc;
+ }
}
diff --git a/drivers/net/ethernet/intel/ice/ice_base.h b/drivers/net/ethernet/intel/ice/ice_base.h
index 20e1c29aa68a..b67dca417acb 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.h
+++ b/drivers/net/ethernet/intel/ice/ice_base.h
@@ -6,7 +6,7 @@
#include "ice.h"
-int ice_vsi_cfg_rxq(struct ice_ring *ring);
+int ice_vsi_cfg_rxq(struct ice_rx_ring *ring);
int __ice_vsi_get_qs(struct ice_qs_cfg *qs_cfg);
int
ice_vsi_ctrl_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx, bool wait);
@@ -15,7 +15,7 @@ int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi);
void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi);
void ice_vsi_free_q_vectors(struct ice_vsi *vsi);
int
-ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring,
+ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring,
struct ice_aqc_add_tx_qgrp *qg_buf);
void ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector);
void
@@ -25,9 +25,9 @@ ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx);
void ice_trigger_sw_intr(struct ice_hw *hw, struct ice_q_vector *q_vector);
int
ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
- u16 rel_vmvf_num, struct ice_ring *ring,
+ u16 rel_vmvf_num, struct ice_tx_ring *ring,
struct ice_txq_meta *txq_meta);
void
-ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_ring *ring,
+ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_tx_ring *ring,
struct ice_txq_meta *txq_meta);
#endif /* _ICE_BASE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index df5ad4de1f00..b3066d0fea8b 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -72,6 +72,27 @@ bool ice_is_e810(struct ice_hw *hw)
}
/**
+ * ice_is_e810t
+ * @hw: pointer to the hardware structure
+ *
+ * returns true if the device is E810T based, false if not.
+ */
+bool ice_is_e810t(struct ice_hw *hw)
+{
+ switch (hw->device_id) {
+ case ICE_DEV_ID_E810C_SFP:
+ if (hw->subsystem_device_id == ICE_SUBDEV_ID_E810T ||
+ hw->subsystem_device_id == ICE_SUBDEV_ID_E810T2)
+ return true;
+ break;
+ default:
+ break;
+ }
+
+ return false;
+}
+
+/**
* ice_clear_pf_cfg - Clear PF configuration
* @hw: pointer to the hardware structure
*
@@ -242,11 +263,13 @@ ice_aq_get_link_topo_handle(struct ice_port_info *pi, u8 node_type,
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
- cmd->addr.node_type_ctx = (ICE_AQC_LINK_TOPO_NODE_CTX_PORT <<
- ICE_AQC_LINK_TOPO_NODE_CTX_S);
+ cmd->addr.topo_params.node_type_ctx =
+ (ICE_AQC_LINK_TOPO_NODE_CTX_PORT <<
+ ICE_AQC_LINK_TOPO_NODE_CTX_S);
/* set node type */
- cmd->addr.node_type_ctx |= (ICE_AQC_LINK_TOPO_NODE_TYPE_M & node_type);
+ cmd->addr.topo_params.node_type_ctx |=
+ (ICE_AQC_LINK_TOPO_NODE_TYPE_M & node_type);
return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
}
@@ -570,6 +593,7 @@ static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
return ICE_ERR_NO_MEMORY;
INIT_LIST_HEAD(&sw->vsi_list_map_head);
+ sw->prof_res_bm_init = 0;
status = ice_init_def_sw_recp(hw);
if (status) {
@@ -596,17 +620,42 @@ static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
list_del(&v_pos_map->list_entry);
devm_kfree(ice_hw_to_dev(hw), v_pos_map);
}
- recps = hw->switch_info->recp_list;
- for (i = 0; i < ICE_SW_LKUP_LAST; i++) {
- struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry;
+ recps = sw->recp_list;
+ for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
+ struct ice_recp_grp_entry *rg_entry, *tmprg_entry;
recps[i].root_rid = i;
- mutex_destroy(&recps[i].filt_rule_lock);
- list_for_each_entry_safe(lst_itr, tmp_entry,
- &recps[i].filt_rules, list_entry) {
- list_del(&lst_itr->list_entry);
- devm_kfree(ice_hw_to_dev(hw), lst_itr);
+ list_for_each_entry_safe(rg_entry, tmprg_entry,
+ &recps[i].rg_list, l_entry) {
+ list_del(&rg_entry->l_entry);
+ devm_kfree(ice_hw_to_dev(hw), rg_entry);
+ }
+
+ if (recps[i].adv_rule) {
+ struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
+ struct ice_adv_fltr_mgmt_list_entry *lst_itr;
+
+ mutex_destroy(&recps[i].filt_rule_lock);
+ list_for_each_entry_safe(lst_itr, tmp_entry,
+ &recps[i].filt_rules,
+ list_entry) {
+ list_del(&lst_itr->list_entry);
+ devm_kfree(ice_hw_to_dev(hw), lst_itr->lkups);
+ devm_kfree(ice_hw_to_dev(hw), lst_itr);
+ }
+ } else {
+ struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry;
+
+ mutex_destroy(&recps[i].filt_rule_lock);
+ list_for_each_entry_safe(lst_itr, tmp_entry,
+ &recps[i].filt_rules,
+ list_entry) {
+ list_del(&lst_itr->list_entry);
+ devm_kfree(ice_hw_to_dev(hw), lst_itr);
+ }
}
+ if (recps[i].root_buf)
+ devm_kfree(ice_hw_to_dev(hw), recps[i].root_buf);
}
ice_rm_all_sw_replay_rule_info(hw);
devm_kfree(ice_hw_to_dev(hw), sw->recp_list);
@@ -4769,6 +4818,64 @@ ice_aq_get_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx,
}
/**
+ * ice_aq_set_gpio
+ * @hw: pointer to the hw struct
+ * @gpio_ctrl_handle: GPIO controller node handle
+ * @pin_idx: IO Number of the GPIO that needs to be set
+ * @value: SW provide IO value to set in the LSB
+ * @cd: pointer to command details structure or NULL
+ *
+ * Sends 0x06EC AQ command to set the GPIO pin state that's part of the topology
+ */
+int
+ice_aq_set_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool value,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_gpio *cmd;
+ struct ice_aq_desc desc;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_gpio);
+ cmd = &desc.params.read_write_gpio;
+ cmd->gpio_ctrl_handle = cpu_to_le16(gpio_ctrl_handle);
+ cmd->gpio_num = pin_idx;
+ cmd->gpio_val = value ? 1 : 0;
+
+ return ice_status_to_errno(ice_aq_send_cmd(hw, &desc, NULL, 0, cd));
+}
+
+/**
+ * ice_aq_get_gpio
+ * @hw: pointer to the hw struct
+ * @gpio_ctrl_handle: GPIO controller node handle
+ * @pin_idx: IO Number of the GPIO that needs to be set
+ * @value: IO value read
+ * @cd: pointer to command details structure or NULL
+ *
+ * Sends 0x06ED AQ command to get the value of a GPIO signal which is part of
+ * the topology
+ */
+int
+ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
+ bool *value, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_gpio *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_gpio);
+ cmd = &desc.params.read_write_gpio;
+ cmd->gpio_ctrl_handle = cpu_to_le16(gpio_ctrl_handle);
+ cmd->gpio_num = pin_idx;
+
+ status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+ if (status)
+ return ice_status_to_errno(status);
+
+ *value = !!cmd->gpio_val;
+ return 0;
+}
+
+/**
* ice_fw_supports_link_override
* @hw: pointer to the hardware structure
*
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index fb16070f02e2..65c1b3244264 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -183,6 +183,7 @@ ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
void
ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
u64 *prev_stat, u64 *cur_stat);
+bool ice_is_e810t(struct ice_hw *hw);
enum ice_status
ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
struct ice_aqc_txsched_elem_data *buf);
@@ -192,6 +193,12 @@ ice_aq_set_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx,
int
ice_aq_get_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx,
u32 *value, struct ice_sq_cd *cd);
+int
+ice_aq_set_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool value,
+ struct ice_sq_cd *cd);
+int
+ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
+ bool *value, struct ice_sq_cd *cd);
enum ice_status
ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
struct ice_sq_cd *cd);
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.c b/drivers/net/ethernet/intel/ice/ice_dcb.c
index 849fcf605479..241427cd9bc0 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2019, Intel Corporation. */
#include "ice_common.h"
+#include "ice_lib.h"
#include "ice_sched.h"
#include "ice_dcb.h"
@@ -736,6 +737,45 @@ ice_aq_get_cee_dcb_cfg(struct ice_hw *hw,
}
/**
+ * ice_aq_set_pfc_mode - Set PFC mode
+ * @hw: pointer to the HW struct
+ * @pfc_mode: value of PFC mode to set
+ * @cd: pointer to command details structure or NULL
+ *
+ * This AQ call configures the PFC mode to DSCP-based PFC mode or
+ * VLAN-based PFC (0x0303)
+ */
+int ice_aq_set_pfc_mode(struct ice_hw *hw, u8 pfc_mode, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_set_query_pfc_mode *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ if (pfc_mode > ICE_AQC_PFC_DSCP_BASED_PFC)
+ return -EINVAL;
+
+ cmd = &desc.params.set_query_pfc_mode;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_pfc_mode);
+
+ cmd->pfc_mode = pfc_mode;
+
+ status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+ if (status)
+ return ice_status_to_errno(status);
+
+ /* FW will write the PFC mode set back into cmd->pfc_mode, but if DCB is
+ * disabled, FW will write back 0 to cmd->pfc_mode. After the AQ has
+ * been executed, check if cmd->pfc_mode is what was requested. If not,
+ * return an error.
+ */
+ if (cmd->pfc_mode != pfc_mode)
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+/**
* ice_cee_to_dcb_cfg
* @cee_cfg: pointer to CEE configuration struct
* @pi: port information structure
@@ -1207,7 +1247,140 @@ ice_add_ieee_app_pri_tlv(struct ice_lldp_org_tlv *tlv,
}
/**
- * ice_add_dcb_tlv - Add all IEEE TLVs
+ * ice_add_dscp_up_tlv - Prepare DSCP to UP TLV
+ * @tlv: location to build the TLV data
+ * @dcbcfg: location of data to convert to TLV
+ */
+static void
+ice_add_dscp_up_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg)
+{
+ u8 *buf = tlv->tlvinfo;
+ u32 ouisubtype;
+ u16 typelen;
+ int i;
+
+ typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
+ ICE_DSCP_UP_TLV_LEN);
+ tlv->typelen = htons(typelen);
+
+ ouisubtype = (u32)((ICE_DSCP_OUI << ICE_LLDP_TLV_OUI_S) |
+ ICE_DSCP_SUBTYPE_DSCP2UP);
+ tlv->ouisubtype = htonl(ouisubtype);
+
+ /* bytes 0 - 63 - IPv4 DSCP2UP LUT */
+ for (i = 0; i < ICE_DSCP_NUM_VAL; i++) {
+ /* IPv4 mapping */
+ buf[i] = dcbcfg->dscp_map[i];
+ /* IPv6 mapping */
+ buf[i + ICE_DSCP_IPV6_OFFSET] = dcbcfg->dscp_map[i];
+ }
+
+ /* byte 64 - IPv4 untagged traffic */
+ buf[i] = 0;
+
+ /* byte 144 - IPv6 untagged traffic */
+ buf[i + ICE_DSCP_IPV6_OFFSET] = 0;
+}
+
+#define ICE_BYTES_PER_TC 8
+/**
+ * ice_add_dscp_enf_tlv - Prepare DSCP Enforcement TLV
+ * @tlv: location to build the TLV data
+ */
+static void
+ice_add_dscp_enf_tlv(struct ice_lldp_org_tlv *tlv)
+{
+ u8 *buf = tlv->tlvinfo;
+ u32 ouisubtype;
+ u16 typelen;
+
+ typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
+ ICE_DSCP_ENF_TLV_LEN);
+ tlv->typelen = htons(typelen);
+
+ ouisubtype = (u32)((ICE_DSCP_OUI << ICE_LLDP_TLV_OUI_S) |
+ ICE_DSCP_SUBTYPE_ENFORCE);
+ tlv->ouisubtype = htonl(ouisubtype);
+
+ /* Allow all DSCP values to be valid for all TC's (IPv4 and IPv6) */
+ memset(buf, 0, 2 * (ICE_MAX_TRAFFIC_CLASS * ICE_BYTES_PER_TC));
+}
+
+/**
+ * ice_add_dscp_tc_bw_tlv - Prepare DSCP BW for TC TLV
+ * @tlv: location to build the TLV data
+ * @dcbcfg: location of the data to convert to TLV
+ */
+static void
+ice_add_dscp_tc_bw_tlv(struct ice_lldp_org_tlv *tlv,
+ struct ice_dcbx_cfg *dcbcfg)
+{
+ struct ice_dcb_ets_cfg *etscfg;
+ u8 *buf = tlv->tlvinfo;
+ u32 ouisubtype;
+ u8 offset = 0;
+ u16 typelen;
+ int i;
+
+ typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
+ ICE_DSCP_TC_BW_TLV_LEN);
+ tlv->typelen = htons(typelen);
+
+ ouisubtype = (u32)((ICE_DSCP_OUI << ICE_LLDP_TLV_OUI_S) |
+ ICE_DSCP_SUBTYPE_TCBW);
+ tlv->ouisubtype = htonl(ouisubtype);
+
+ /* First Octect after subtype
+ * ----------------------------
+ * | RSV | CBS | RSV | Max TCs |
+ * | 1b | 1b | 3b | 3b |
+ * ----------------------------
+ */
+ etscfg = &dcbcfg->etscfg;
+ buf[0] = etscfg->maxtcs & ICE_IEEE_ETS_MAXTC_M;
+
+ /* bytes 1 - 4 reserved */
+ offset = 5;
+
+ /* TC BW table
+ * bytes 0 - 7 for TC 0 - 7
+ *
+ * TSA Assignment table
+ * bytes 8 - 15 for TC 0 - 7
+ */
+ for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) {
+ buf[offset] = etscfg->tcbwtable[i];
+ buf[offset + ICE_MAX_TRAFFIC_CLASS] = etscfg->tsatable[i];
+ offset++;
+ }
+}
+
+/**
+ * ice_add_dscp_pfc_tlv - Prepare DSCP PFC TLV
+ * @tlv: Fill PFC TLV in IEEE format
+ * @dcbcfg: Local store which holds the PFC CFG data
+ */
+static void
+ice_add_dscp_pfc_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg)
+{
+ u8 *buf = tlv->tlvinfo;
+ u32 ouisubtype;
+ u16 typelen;
+
+ typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
+ ICE_DSCP_PFC_TLV_LEN);
+ tlv->typelen = htons(typelen);
+
+ ouisubtype = (u32)((ICE_DSCP_OUI << ICE_LLDP_TLV_OUI_S) |
+ ICE_DSCP_SUBTYPE_PFC);
+ tlv->ouisubtype = htonl(ouisubtype);
+
+ buf[0] = dcbcfg->pfc.pfccap & 0xF;
+ buf[1] = dcbcfg->pfc.pfcena & 0xF;
+}
+
+/**
+ * ice_add_dcb_tlv - Add all IEEE or DSCP TLVs
* @tlv: Fill TLV data in IEEE format
* @dcbcfg: Local store which holds the DCB Config
* @tlvid: Type of IEEE TLV
@@ -1218,21 +1391,41 @@ static void
ice_add_dcb_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg,
u16 tlvid)
{
- switch (tlvid) {
- case ICE_IEEE_TLV_ID_ETS_CFG:
- ice_add_ieee_ets_tlv(tlv, dcbcfg);
- break;
- case ICE_IEEE_TLV_ID_ETS_REC:
- ice_add_ieee_etsrec_tlv(tlv, dcbcfg);
- break;
- case ICE_IEEE_TLV_ID_PFC_CFG:
- ice_add_ieee_pfc_tlv(tlv, dcbcfg);
- break;
- case ICE_IEEE_TLV_ID_APP_PRI:
- ice_add_ieee_app_pri_tlv(tlv, dcbcfg);
- break;
- default:
- break;
+ if (dcbcfg->pfc_mode == ICE_QOS_MODE_VLAN) {
+ switch (tlvid) {
+ case ICE_IEEE_TLV_ID_ETS_CFG:
+ ice_add_ieee_ets_tlv(tlv, dcbcfg);
+ break;
+ case ICE_IEEE_TLV_ID_ETS_REC:
+ ice_add_ieee_etsrec_tlv(tlv, dcbcfg);
+ break;
+ case ICE_IEEE_TLV_ID_PFC_CFG:
+ ice_add_ieee_pfc_tlv(tlv, dcbcfg);
+ break;
+ case ICE_IEEE_TLV_ID_APP_PRI:
+ ice_add_ieee_app_pri_tlv(tlv, dcbcfg);
+ break;
+ default:
+ break;
+ }
+ } else {
+ /* pfc_mode == ICE_QOS_MODE_DSCP */
+ switch (tlvid) {
+ case ICE_TLV_ID_DSCP_UP:
+ ice_add_dscp_up_tlv(tlv, dcbcfg);
+ break;
+ case ICE_TLV_ID_DSCP_ENF:
+ ice_add_dscp_enf_tlv(tlv);
+ break;
+ case ICE_TLV_ID_DSCP_TC_BW:
+ ice_add_dscp_tc_bw_tlv(tlv, dcbcfg);
+ break;
+ case ICE_TLV_ID_DSCP_TO_PFC:
+ ice_add_dscp_pfc_tlv(tlv, dcbcfg);
+ break;
+ default:
+ break;
+ }
}
}
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.h b/drivers/net/ethernet/intel/ice/ice_dcb.h
index d7e5e6178a21..9b6f87a889a6 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb.h
+++ b/drivers/net/ethernet/intel/ice/ice_dcb.h
@@ -22,6 +22,14 @@
#define ICE_CEE_DCBX_OUI 0x001B21
#define ICE_CEE_DCBX_TYPE 2
+
+#define ICE_DSCP_OUI 0xFFFFFF
+#define ICE_DSCP_SUBTYPE_DSCP2UP 0x41
+#define ICE_DSCP_SUBTYPE_ENFORCE 0x42
+#define ICE_DSCP_SUBTYPE_TCBW 0x43
+#define ICE_DSCP_SUBTYPE_PFC 0x44
+#define ICE_DSCP_IPV6_OFFSET 80
+
#define ICE_CEE_SUBTYPE_PG_CFG 2
#define ICE_CEE_SUBTYPE_PFC_CFG 3
#define ICE_CEE_SUBTYPE_APP_PRI 4
@@ -78,11 +86,20 @@
#define ICE_IEEE_TLV_ID_APP_PRI 6
#define ICE_TLV_ID_END_OF_LLDPPDU 7
#define ICE_TLV_ID_START ICE_IEEE_TLV_ID_ETS_CFG
+#define ICE_TLV_ID_DSCP_UP 3
+#define ICE_TLV_ID_DSCP_ENF 4
+#define ICE_TLV_ID_DSCP_TC_BW 5
+#define ICE_TLV_ID_DSCP_TO_PFC 6
#define ICE_IEEE_ETS_TLV_LEN 25
#define ICE_IEEE_PFC_TLV_LEN 6
#define ICE_IEEE_APP_TLV_LEN 11
+#define ICE_DSCP_UP_TLV_LEN 148
+#define ICE_DSCP_ENF_TLV_LEN 132
+#define ICE_DSCP_TC_BW_TLV_LEN 25
+#define ICE_DSCP_PFC_TLV_LEN 6
+
/* IEEE 802.1AB LLDP Organization specific TLV */
struct ice_lldp_org_tlv {
__be16 typelen;
@@ -120,6 +137,7 @@ struct ice_cee_app_prio {
u8 prio_map;
} __packed;
+int ice_aq_set_pfc_mode(struct ice_hw *hw, u8 pfc_mode, struct ice_sq_cd *cd);
enum ice_status
ice_aq_get_dcb_cfg(struct ice_hw *hw, u8 mib_type, u8 bridgetype,
struct ice_dcbx_cfg *dcbcfg);
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
index 926cf748c5ec..a72e18320a22 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
@@ -5,52 +5,10 @@
#include "ice_dcb_nl.h"
/**
- * ice_vsi_cfg_netdev_tc - Setup the netdev TC configuration
- * @vsi: the VSI being configured
- * @ena_tc: TC map to be enabled
- */
-void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc)
-{
- struct net_device *netdev = vsi->netdev;
- struct ice_pf *pf = vsi->back;
- struct ice_dcbx_cfg *dcbcfg;
- u8 netdev_tc;
- int i;
-
- if (!netdev)
- return;
-
- if (!ena_tc) {
- netdev_reset_tc(netdev);
- return;
- }
-
- if (netdev_set_num_tc(netdev, vsi->tc_cfg.numtc))
- return;
-
- dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
-
- ice_for_each_traffic_class(i)
- if (vsi->tc_cfg.ena_tc & BIT(i))
- netdev_set_tc_queue(netdev,
- vsi->tc_cfg.tc_info[i].netdev_tc,
- vsi->tc_cfg.tc_info[i].qcount_tx,
- vsi->tc_cfg.tc_info[i].qoffset);
-
- for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) {
- u8 ets_tc = dcbcfg->etscfg.prio_table[i];
-
- /* Get the mapped netdev TC# for the UP */
- netdev_tc = vsi->tc_cfg.tc_info[ets_tc].netdev_tc;
- netdev_set_prio_tc_map(netdev, i, netdev_tc);
- }
-}
-
-/**
* ice_dcb_get_ena_tc - return bitmap of enabled TCs
* @dcbcfg: DCB config to evaluate for enabled TCs
*/
-u8 ice_dcb_get_ena_tc(struct ice_dcbx_cfg *dcbcfg)
+static u8 ice_dcb_get_ena_tc(struct ice_dcbx_cfg *dcbcfg)
{
u8 i, num_tc, ena_tc = 1;
@@ -179,6 +137,67 @@ u8 ice_dcb_get_num_tc(struct ice_dcbx_cfg *dcbcfg)
}
/**
+ * ice_get_first_droptc - returns number of first droptc
+ * @vsi: used to find the first droptc
+ *
+ * This function returns the value of first_droptc.
+ * When DCB is enabled, first droptc information is derived from enabled_tc
+ * and PFC enabled bits. otherwise this function returns 0 as there is one
+ * TC without DCB (tc0)
+ */
+static u8 ice_get_first_droptc(struct ice_vsi *vsi)
+{
+ struct ice_dcbx_cfg *cfg = &vsi->port_info->qos_cfg.local_dcbx_cfg;
+ struct device *dev = ice_pf_to_dev(vsi->back);
+ u8 num_tc, ena_tc_map, pfc_ena_map;
+ u8 i;
+
+ num_tc = ice_dcb_get_num_tc(cfg);
+
+ /* get bitmap of enabled TCs */
+ ena_tc_map = ice_dcb_get_ena_tc(cfg);
+
+ /* get bitmap of PFC enabled TCs */
+ pfc_ena_map = cfg->pfc.pfcena;
+
+ /* get first TC that is not PFC enabled */
+ for (i = 0; i < num_tc; i++) {
+ if ((ena_tc_map & BIT(i)) && (!(pfc_ena_map & BIT(i)))) {
+ dev_dbg(dev, "first drop tc = %d\n", i);
+ return i;
+ }
+ }
+
+ dev_dbg(dev, "first drop tc = 0\n");
+ return 0;
+}
+
+/**
+ * ice_vsi_set_dcb_tc_cfg - Set VSI's TC based on DCB configuration
+ * @vsi: pointer to the VSI instance
+ */
+void ice_vsi_set_dcb_tc_cfg(struct ice_vsi *vsi)
+{
+ struct ice_dcbx_cfg *cfg = &vsi->port_info->qos_cfg.local_dcbx_cfg;
+
+ switch (vsi->type) {
+ case ICE_VSI_PF:
+ vsi->tc_cfg.ena_tc = ice_dcb_get_ena_tc(cfg);
+ vsi->tc_cfg.numtc = ice_dcb_get_num_tc(cfg);
+ break;
+ case ICE_VSI_CHNL:
+ vsi->tc_cfg.ena_tc = BIT(ice_get_first_droptc(vsi));
+ vsi->tc_cfg.numtc = 1;
+ break;
+ case ICE_VSI_CTRL:
+ case ICE_VSI_LB:
+ default:
+ vsi->tc_cfg.ena_tc = ICE_DFLT_TRAFFIC_CLASS;
+ vsi->tc_cfg.numtc = 1;
+ }
+}
+
+/**
* ice_dcb_get_tc - Get the TC associated with the queue
* @vsi: ptr to the VSI
* @queue_index: queue number associated with VSI
@@ -194,17 +213,18 @@ u8 ice_dcb_get_tc(struct ice_vsi *vsi, int queue_index)
*/
void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi)
{
- struct ice_ring *tx_ring, *rx_ring;
+ struct ice_tx_ring *tx_ring;
+ struct ice_rx_ring *rx_ring;
u16 qoffset, qcount;
int i, n;
if (!test_bit(ICE_FLAG_DCB_ENA, vsi->back->flags)) {
/* Reset the TC information */
- for (i = 0; i < vsi->num_txq; i++) {
+ ice_for_each_txq(vsi, i) {
tx_ring = vsi->tx_rings[i];
tx_ring->dcb_tc = 0;
}
- for (i = 0; i < vsi->num_rxq; i++) {
+ ice_for_each_rxq(vsi, i) {
rx_ring = vsi->rx_rings[i];
rx_ring->dcb_tc = 0;
}
@@ -217,11 +237,68 @@ void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi)
qoffset = vsi->tc_cfg.tc_info[n].qoffset;
qcount = vsi->tc_cfg.tc_info[n].qcount_tx;
- for (i = qoffset; i < (qoffset + qcount); i++) {
- tx_ring = vsi->tx_rings[i];
- rx_ring = vsi->rx_rings[i];
- tx_ring->dcb_tc = n;
- rx_ring->dcb_tc = n;
+ for (i = qoffset; i < (qoffset + qcount); i++)
+ vsi->tx_rings[i]->dcb_tc = n;
+
+ qcount = vsi->tc_cfg.tc_info[n].qcount_rx;
+ for (i = qoffset; i < (qoffset + qcount); i++)
+ vsi->rx_rings[i]->dcb_tc = n;
+ }
+ /* applicable only if "all_enatc" is set, which will be set from
+ * setup_tc method as part of configuring channels
+ */
+ if (vsi->all_enatc) {
+ u8 first_droptc = ice_get_first_droptc(vsi);
+
+ /* When DCB is configured, TC for ADQ queues (which are really
+ * PF queues) should be the first drop TC of the main VSI
+ */
+ ice_for_each_chnl_tc(n) {
+ if (!(vsi->all_enatc & BIT(n)))
+ break;
+
+ qoffset = vsi->mqprio_qopt.qopt.offset[n];
+ qcount = vsi->mqprio_qopt.qopt.count[n];
+ for (i = qoffset; i < (qoffset + qcount); i++) {
+ vsi->tx_rings[i]->dcb_tc = first_droptc;
+ vsi->rx_rings[i]->dcb_tc = first_droptc;
+ }
+ }
+ }
+}
+
+/**
+ * ice_dcb_ena_dis_vsi - disable certain VSIs for DCB config/reconfig
+ * @pf: pointer to the PF instance
+ * @ena: true to enable VSIs, false to disable
+ * @locked: true if caller holds RTNL lock, false otherwise
+ *
+ * Before a new DCB configuration can be applied, VSIs of type PF, SWITCHDEV
+ * and CHNL need to be brought down. Following completion of DCB configuration
+ * the VSIs that were downed need to be brought up again. This helper function
+ * does both.
+ */
+static void ice_dcb_ena_dis_vsi(struct ice_pf *pf, bool ena, bool locked)
+{
+ int i;
+
+ ice_for_each_vsi(pf, i) {
+ struct ice_vsi *vsi = pf->vsi[i];
+
+ if (!vsi)
+ continue;
+
+ switch (vsi->type) {
+ case ICE_VSI_CHNL:
+ case ICE_VSI_SWITCHDEV_CTRL:
+ case ICE_VSI_PF:
+ if (ena)
+ ice_ena_vsi(vsi, locked);
+ else
+ ice_dis_vsi(vsi, locked);
+ break;
+ default:
+ continue;
}
}
}
@@ -330,7 +407,9 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked)
*/
if (!locked)
rtnl_lock();
- ice_dis_vsi(pf_vsi, true);
+
+ /* disable VSIs affected by DCB changes */
+ ice_dcb_ena_dis_vsi(pf, false, true);
memcpy(curr_cfg, new_cfg, sizeof(*curr_cfg));
memcpy(&curr_cfg->etsrec, &curr_cfg->etscfg, sizeof(curr_cfg->etsrec));
@@ -358,7 +437,8 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked)
ice_pf_dcb_recfg(pf);
out:
- ice_ena_vsi(pf_vsi, true);
+ /* enable previously downed VSIs */
+ ice_dcb_ena_dis_vsi(pf, true, true);
if (!locked)
rtnl_unlock();
free_cfg:
@@ -544,7 +624,7 @@ static int ice_dcb_init_cfg(struct ice_pf *pf, bool locked)
* @ets_willing: configure ETS willing
* @locked: was this function called with RTNL held
*/
-static int ice_dcb_sw_dflt_cfg(struct ice_pf *pf, bool ets_willing, bool locked)
+int ice_dcb_sw_dflt_cfg(struct ice_pf *pf, bool ets_willing, bool locked)
{
struct ice_aqc_port_ets_elem buf = { 0 };
struct ice_dcbx_cfg *dcbcfg;
@@ -673,6 +753,8 @@ void ice_pf_dcb_recfg(struct ice_pf *pf)
tc_map = ICE_DFLT_TRAFFIC_CLASS;
ice_dcb_noncontig_cfg(pf);
}
+ } else if (vsi->type == ICE_VSI_CHNL) {
+ tc_map = BIT(ice_get_first_droptc(vsi));
} else {
tc_map = ICE_DFLT_TRAFFIC_CLASS;
}
@@ -683,6 +765,12 @@ void ice_pf_dcb_recfg(struct ice_pf *pf)
vsi->idx);
continue;
}
+ /* no need to proceed with remaining cfg if it is CHNL
+ * or switchdev VSI
+ */
+ if (vsi->type == ICE_VSI_CHNL ||
+ vsi->type == ICE_VSI_SWITCHDEV_CTRL)
+ continue;
ice_vsi_map_rings_to_vectors(vsi);
if (vsi->type == ICE_VSI_PF)
@@ -726,6 +814,11 @@ int ice_init_pf_dcb(struct ice_pf *pf, bool locked)
/* FW LLDP is disabled, activate SW DCBX/LLDP mode */
dev_info(dev, "FW LLDP is disabled, DCBx/LLDP in SW mode.\n");
clear_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags);
+ err = ice_aq_set_pfc_mode(&pf->hw, ICE_AQC_PFC_VLAN_BASED_PFC,
+ NULL);
+ if (err)
+ dev_info(dev, "Failed to set VLAN PFC mode\n");
+
err = ice_dcb_sw_dflt_cfg(pf, true, locked);
if (err) {
dev_err(dev, "Failed to set local DCB config %d\n",
@@ -814,7 +907,7 @@ void ice_update_dcb_stats(struct ice_pf *pf)
* tag will already be configured with the correct ID and priority bits
*/
void
-ice_tx_prepare_vlan_flags_dcb(struct ice_ring *tx_ring,
+ice_tx_prepare_vlan_flags_dcb(struct ice_tx_ring *tx_ring,
struct ice_tx_buf *first)
{
struct sk_buff *skb = first->skb;
@@ -851,7 +944,6 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
struct ice_dcbx_cfg tmp_dcbx_cfg;
bool need_reconfig = false;
struct ice_port_info *pi;
- struct ice_vsi *pf_vsi;
u8 mib_type;
int ret;
@@ -927,14 +1019,9 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
}
- pf_vsi = ice_get_main_vsi(pf);
- if (!pf_vsi) {
- dev_dbg(dev, "PF VSI doesn't exist\n");
- goto out;
- }
-
rtnl_lock();
- ice_dis_vsi(pf_vsi, true);
+ /* disable VSIs affected by DCB changes */
+ ice_dcb_ena_dis_vsi(pf, false, true);
ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL);
if (ret) {
@@ -945,7 +1032,8 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
/* changes in configuration update VSI */
ice_pf_dcb_recfg(pf);
- ice_ena_vsi(pf_vsi, true);
+ /* enable previously downed VSIs */
+ ice_dcb_ena_dis_vsi(pf, true, true);
unlock_rtnl:
rtnl_unlock();
out:
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
index 261b6e2ed7bc..4c421c842a13 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
@@ -15,7 +15,7 @@
#define ICE_DCB_HW_CHG 2 /* DCB configuration changed, no reset */
void ice_dcb_rebuild(struct ice_pf *pf);
-u8 ice_dcb_get_ena_tc(struct ice_dcbx_cfg *dcbcfg);
+int ice_dcb_sw_dflt_cfg(struct ice_pf *pf, bool ets_willing, bool locked);
u8 ice_dcb_get_num_tc(struct ice_dcbx_cfg *dcbcfg);
void ice_vsi_set_dcb_tc_cfg(struct ice_vsi *vsi);
bool ice_is_pfc_causing_hung_q(struct ice_pf *pf, unsigned int txqueue);
@@ -28,13 +28,11 @@ void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi);
int ice_init_pf_dcb(struct ice_pf *pf, bool locked);
void ice_update_dcb_stats(struct ice_pf *pf);
void
-ice_tx_prepare_vlan_flags_dcb(struct ice_ring *tx_ring,
+ice_tx_prepare_vlan_flags_dcb(struct ice_tx_ring *tx_ring,
struct ice_tx_buf *first);
void
ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
struct ice_rq_event_info *event);
-void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc);
-
/**
* ice_find_q_in_range
* @low: start of queue range for a TC i.e. offset of TC
@@ -49,9 +47,9 @@ static inline bool ice_find_q_in_range(u16 low, u16 high, unsigned int tx_q)
}
static inline void
-ice_set_cgd_num(struct ice_tlan_ctx *tlan_ctx, struct ice_ring *ring)
+ice_set_cgd_num(struct ice_tlan_ctx *tlan_ctx, u8 dcb_tc)
{
- tlan_ctx->cgd_num = ring->dcb_tc;
+ tlan_ctx->cgd_num = dcb_tc;
}
static inline bool ice_is_dcb_active(struct ice_pf *pf)
@@ -59,9 +57,21 @@ static inline bool ice_is_dcb_active(struct ice_pf *pf)
return (test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags) ||
test_bit(ICE_FLAG_DCB_ENA, pf->flags));
}
+
+static inline u8 ice_get_pfc_mode(struct ice_pf *pf)
+{
+ return pf->hw.port_info->qos_cfg.local_dcbx_cfg.pfc_mode;
+}
+
#else
static inline void ice_dcb_rebuild(struct ice_pf *pf) { }
+static inline void ice_vsi_set_dcb_tc_cfg(struct ice_vsi *vsi)
+{
+ vsi->tc_cfg.ena_tc = ICE_DFLT_TRAFFIC_CLASS;
+ vsi->tc_cfg.numtc = 1;
+}
+
static inline u8 ice_dcb_get_ena_tc(struct ice_dcbx_cfg __always_unused *dcbcfg)
{
return ICE_DFLT_TRAFFIC_CLASS;
@@ -95,7 +105,7 @@ ice_pf_dcb_cfg(struct ice_pf __always_unused *pf,
}
static inline int
-ice_tx_prepare_vlan_flags_dcb(struct ice_ring __always_unused *tx_ring,
+ice_tx_prepare_vlan_flags_dcb(struct ice_tx_ring __always_unused *tx_ring,
struct ice_tx_buf __always_unused *first)
{
return 0;
@@ -113,12 +123,16 @@ ice_is_pfc_causing_hung_q(struct ice_pf __always_unused *pf,
return false;
}
+static inline u8 ice_get_pfc_mode(struct ice_pf *pf)
+{
+ return 0;
+}
+
static inline void ice_pf_dcb_recfg(struct ice_pf *pf) { }
static inline void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi) { }
static inline void ice_update_dcb_stats(struct ice_pf *pf) { }
static inline void
ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf, struct ice_rq_event_info *event) { }
-static inline void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc) { }
-static inline void ice_set_cgd_num(struct ice_tlan_ctx *tlan_ctx, struct ice_ring *ring) { }
+static inline void ice_set_cgd_num(struct ice_tlan_ctx *tlan_ctx, u8 dcb_tc) { }
#endif /* CONFIG_DCB */
#endif /* _ICE_DCB_LIB_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
index 4180f1f35fb8..7fdeb411b6df 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
@@ -64,7 +64,7 @@ static int ice_dcbnl_setets(struct net_device *netdev, struct ieee_ets *ets)
struct ice_pf *pf = ice_netdev_to_pf(netdev);
struct ice_dcbx_cfg *new_cfg;
int bwcfg = 0, bwrec = 0;
- int err, i, max_tc = 0;
+ int err, i;
if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
!(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
@@ -80,13 +80,14 @@ static int ice_dcbnl_setets(struct net_device *netdev, struct ieee_ets *ets)
new_cfg->etscfg.tcbwtable[i] = ets->tc_tx_bw[i];
bwcfg += ets->tc_tx_bw[i];
new_cfg->etscfg.tsatable[i] = ets->tc_tsa[i];
- new_cfg->etscfg.prio_table[i] = ets->prio_tc[i];
- if (ets->prio_tc[i] > max_tc)
- max_tc = ets->prio_tc[i];
+ if (new_cfg->pfc_mode == ICE_QOS_MODE_VLAN) {
+ /* in DSCP mode up->tc mapping cannot change */
+ new_cfg->etscfg.prio_table[i] = ets->prio_tc[i];
+ new_cfg->etsrec.prio_table[i] = ets->reco_prio_tc[i];
+ }
new_cfg->etsrec.tcbwtable[i] = ets->tc_reco_bw[i];
bwrec += ets->tc_reco_bw[i];
new_cfg->etsrec.tsatable[i] = ets->tc_reco_tsa[i];
- new_cfg->etsrec.prio_table[i] = ets->reco_prio_tc[i];
}
if (ice_dcb_bwchk(pf, new_cfg)) {
@@ -94,12 +95,7 @@ static int ice_dcbnl_setets(struct net_device *netdev, struct ieee_ets *ets)
goto ets_out;
}
- max_tc = pf->hw.func_caps.common_cap.maxtc;
-
- new_cfg->etscfg.maxtcs = max_tc;
-
- if (!bwcfg)
- new_cfg->etscfg.tcbwtable[0] = 100;
+ new_cfg->etscfg.maxtcs = pf->hw.func_caps.common_cap.maxtc;
if (!bwrec)
new_cfg->etsrec.tcbwtable[0] = 100;
@@ -173,10 +169,13 @@ static u8 ice_dcbnl_setdcbx(struct net_device *netdev, u8 mode)
pf->dcbx_cap = mode;
qos_cfg = &pf->hw.port_info->qos_cfg;
- if (mode & DCB_CAP_DCBX_VER_CEE)
+ if (mode & DCB_CAP_DCBX_VER_CEE) {
+ if (qos_cfg->local_dcbx_cfg.pfc_mode == ICE_QOS_MODE_DSCP)
+ return ICE_DCB_NO_HW_CHG;
qos_cfg->local_dcbx_cfg.dcbx_mode = ICE_DCBX_MODE_CEE;
- else
+ } else {
qos_cfg->local_dcbx_cfg.dcbx_mode = ICE_DCBX_MODE_IEEE;
+ }
dev_info(ice_pf_to_dev(pf), "DCBx mode = 0x%x\n", mode);
return ICE_DCB_HW_CHG_RST;
@@ -683,6 +682,8 @@ ice_dcbnl_find_app(struct ice_dcbx_cfg *cfg,
return false;
}
+#define ICE_BYTES_PER_DSCP_VAL 8
+
/**
* ice_dcbnl_setapp - set local IEEE App config
* @netdev: relevant netdev struct
@@ -693,42 +694,117 @@ static int ice_dcbnl_setapp(struct net_device *netdev, struct dcb_app *app)
struct ice_pf *pf = ice_netdev_to_pf(netdev);
struct ice_dcb_app_priority_table new_app;
struct ice_dcbx_cfg *old_cfg, *new_cfg;
+ u8 max_tc;
int ret;
- if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
- !(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
+ /* ONLY DSCP APP TLVs have operational significance */
+ if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP)
return -EINVAL;
- mutex_lock(&pf->tc_mutex);
+ /* only allow APP TLVs in SW Mode */
+ if (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) {
+ netdev_err(netdev, "can't do DSCP QoS when FW DCB agent active\n");
+ return -EINVAL;
+ }
- new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg;
+ if (!(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
+ return -EINVAL;
- old_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
+ if (!ice_is_feature_supported(pf, ICE_F_DSCP))
+ return -EOPNOTSUPP;
- if (old_cfg->numapps == ICE_DCBX_MAX_APPS) {
- ret = -EINVAL;
- goto setapp_out;
+ if (app->protocol >= ICE_DSCP_NUM_VAL) {
+ netdev_err(netdev, "DSCP value 0x%04X out of range\n",
+ app->protocol);
+ return -EINVAL;
+ }
+
+ max_tc = pf->hw.func_caps.common_cap.maxtc;
+ if (app->priority >= max_tc) {
+ netdev_err(netdev, "TC %d out of range, max TC %d\n",
+ app->priority, max_tc);
+ return -EINVAL;
}
+ /* grab TC mutex */
+ mutex_lock(&pf->tc_mutex);
+
+ new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg;
+ old_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
+
ret = dcb_ieee_setapp(netdev, app);
if (ret)
goto setapp_out;
+ if (test_and_set_bit(app->protocol, new_cfg->dscp_mapped)) {
+ netdev_err(netdev, "DSCP value 0x%04X already user mapped\n",
+ app->protocol);
+ ret = dcb_ieee_delapp(netdev, app);
+ if (ret)
+ netdev_err(netdev, "Failed to delete re-mapping TLV\n");
+ ret = -EINVAL;
+ goto setapp_out;
+ }
+
new_app.selector = app->selector;
new_app.prot_id = app->protocol;
new_app.priority = app->priority;
- if (ice_dcbnl_find_app(old_cfg, &new_app)) {
- ret = 0;
- goto setapp_out;
- }
+ /* If port is not in DSCP mode, need to set */
+ if (old_cfg->pfc_mode == ICE_QOS_MODE_VLAN) {
+ int i, j;
+
+ /* set DSCP mode */
+ ret = ice_aq_set_pfc_mode(&pf->hw, ICE_AQC_PFC_DSCP_BASED_PFC,
+ NULL);
+ if (ret) {
+ netdev_err(netdev, "Failed to set DSCP PFC mode %d\n",
+ ret);
+ goto setapp_out;
+ }
+ netdev_info(netdev, "Switched QoS to L3 DSCP mode\n");
+
+ new_cfg->pfc_mode = ICE_QOS_MODE_DSCP;
+
+ /* set default DSCP QoS values */
+ new_cfg->etscfg.willing = 0;
+ new_cfg->pfc.pfccap = max_tc;
+ new_cfg->pfc.willing = 0;
+
+ for (i = 0; i < max_tc; i++)
+ for (j = 0; j < ICE_BYTES_PER_DSCP_VAL; j++) {
+ int dscp, offset;
+
+ dscp = (i * max_tc) + j;
+ offset = max_tc * ICE_BYTES_PER_DSCP_VAL;
+
+ new_cfg->dscp_map[dscp] = i;
+ /* if less that 8 TCs supported */
+ if (max_tc < ICE_MAX_TRAFFIC_CLASS)
+ new_cfg->dscp_map[dscp + offset] = i;
+ }
+
+ new_cfg->etscfg.tcbwtable[0] = 100;
+ new_cfg->etscfg.tsatable[0] = ICE_IEEE_TSA_ETS;
+ new_cfg->etscfg.prio_table[0] = 0;
+
+ for (i = 1; i < max_tc; i++) {
+ new_cfg->etscfg.tcbwtable[i] = 0;
+ new_cfg->etscfg.tsatable[i] = ICE_IEEE_TSA_ETS;
+ new_cfg->etscfg.prio_table[i] = i;
+ }
+ } /* end of switching to DSCP mode */
+
+ /* apply new mapping for this DSCP value */
+ new_cfg->dscp_map[app->protocol] = app->priority;
new_cfg->app[new_cfg->numapps++] = new_app;
+
ret = ice_pf_dcb_cfg(pf, new_cfg, true);
/* return of zero indicates new cfg applied */
if (ret == ICE_DCB_HW_CHG_RST)
ice_dcbnl_devreset(netdev);
- if (ret == ICE_DCB_NO_HW_CHG)
- ret = ICE_DCB_HW_CHG_RST;
+ else
+ ret = ICE_DCB_NO_HW_CHG;
setapp_out:
mutex_unlock(&pf->tc_mutex);
@@ -749,22 +825,21 @@ static int ice_dcbnl_delapp(struct net_device *netdev, struct dcb_app *app)
unsigned int i, j;
int ret = 0;
- if (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED)
+ if (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) {
+ netdev_err(netdev, "can't delete DSCP netlink app when FW DCB agent is active\n");
return -EINVAL;
+ }
mutex_lock(&pf->tc_mutex);
old_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
- if (old_cfg->numapps <= 1)
- goto delapp_out;
-
ret = dcb_ieee_delapp(netdev, app);
if (ret)
goto delapp_out;
new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg;
- for (i = 1; i < new_cfg->numapps; i++) {
+ for (i = 0; i < new_cfg->numapps; i++) {
if (app->selector == new_cfg->app[i].selector &&
app->protocol == new_cfg->app[i].prot_id &&
app->priority == new_cfg->app[i].priority) {
@@ -784,17 +859,58 @@ static int ice_dcbnl_delapp(struct net_device *netdev, struct dcb_app *app)
new_cfg->numapps--;
for (j = i; j < new_cfg->numapps; j++) {
- new_cfg->app[i].selector = old_cfg->app[j + 1].selector;
- new_cfg->app[i].prot_id = old_cfg->app[j + 1].prot_id;
- new_cfg->app[i].priority = old_cfg->app[j + 1].priority;
+ new_cfg->app[j].selector = old_cfg->app[j + 1].selector;
+ new_cfg->app[j].prot_id = old_cfg->app[j + 1].prot_id;
+ new_cfg->app[j].priority = old_cfg->app[j + 1].priority;
}
- ret = ice_pf_dcb_cfg(pf, new_cfg, true);
- /* return of zero indicates new cfg applied */
+ /* if not a DSCP APP TLV or DSCP is not supported, we are done */
+ if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP ||
+ !ice_is_feature_supported(pf, ICE_F_DSCP)) {
+ ret = ICE_DCB_HW_CHG;
+ goto delapp_out;
+ }
+
+ /* if DSCP TLV, then need to address change in mapping */
+ clear_bit(app->protocol, new_cfg->dscp_mapped);
+ /* remap this DSCP value to default value */
+ new_cfg->dscp_map[app->protocol] = app->protocol %
+ ICE_BYTES_PER_DSCP_VAL;
+
+ /* if the last DSCP mapping just got deleted, need to switch
+ * to L2 VLAN QoS mode
+ */
+ if (bitmap_empty(new_cfg->dscp_mapped, ICE_DSCP_NUM_VAL) &&
+ new_cfg->pfc_mode == ICE_QOS_MODE_DSCP) {
+ ret = ice_aq_set_pfc_mode(&pf->hw,
+ ICE_AQC_PFC_VLAN_BASED_PFC,
+ NULL);
+ if (ret) {
+ netdev_info(netdev, "Failed to set VLAN PFC mode %d\n",
+ ret);
+ goto delapp_out;
+ }
+ netdev_info(netdev, "Switched QoS to L2 VLAN mode\n");
+
+ new_cfg->pfc_mode = ICE_QOS_MODE_VLAN;
+
+ ret = ice_dcb_sw_dflt_cfg(pf, true, true);
+ } else {
+ ret = ice_pf_dcb_cfg(pf, new_cfg, true);
+ }
+
+ /* return of ICE_DCB_HW_CHG_RST indicates new cfg applied
+ * and reset needs to be performed
+ */
if (ret == ICE_DCB_HW_CHG_RST)
ice_dcbnl_devreset(netdev);
+
+ /* if the change was not siginificant enough to actually call
+ * the reconfiguration flow, we still need to tell caller that
+ * their request was successfully handled
+ */
if (ret == ICE_DCB_NO_HW_CHG)
- ret = ICE_DCB_HW_CHG_RST;
+ ret = ICE_DCB_HW_CHG;
delapp_out:
mutex_unlock(&pf->tc_mutex);
diff --git a/drivers/net/ethernet/intel/ice/ice_devids.h b/drivers/net/ethernet/intel/ice/ice_devids.h
index ef4392e6e244..61dd2f18dee8 100644
--- a/drivers/net/ethernet/intel/ice/ice_devids.h
+++ b/drivers/net/ethernet/intel/ice/ice_devids.h
@@ -21,6 +21,8 @@
#define ICE_DEV_ID_E810C_QSFP 0x1592
/* Intel(R) Ethernet Controller E810-C for SFP */
#define ICE_DEV_ID_E810C_SFP 0x1593
+#define ICE_SUBDEV_ID_E810T 0x000E
+#define ICE_SUBDEV_ID_E810T2 0x000F
/* Intel(R) Ethernet Controller E810-XXV for backplane */
#define ICE_DEV_ID_E810_XXV_BACKPLANE 0x1599
/* Intel(R) Ethernet Controller E810-XXV for QSFP */
diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.c b/drivers/net/ethernet/intel/ice/ice_devlink.c
index da7288bdc9a3..1fb754b1355b 100644
--- a/drivers/net/ethernet/intel/ice/ice_devlink.c
+++ b/drivers/net/ethernet/intel/ice/ice_devlink.c
@@ -4,6 +4,7 @@
#include "ice.h"
#include "ice_lib.h"
#include "ice_devlink.h"
+#include "ice_eswitch.h"
#include "ice_fw_update.h"
/* context for devlink info version reporting */
@@ -22,7 +23,7 @@ struct ice_info_ctx {
*
* If a version does not exist, for example when attempting to get the
* inactive version of flash when there is no pending update, the function
- * should leave the buffer in the ctx structure empty and return 0.
+ * should leave the buffer in the ctx structure empty.
*/
static void ice_info_get_dsn(struct ice_pf *pf, struct ice_info_ctx *ctx)
@@ -35,7 +36,7 @@ static void ice_info_get_dsn(struct ice_pf *pf, struct ice_info_ctx *ctx)
snprintf(ctx->buf, sizeof(ctx->buf), "%8phD", dsn);
}
-static int ice_info_pba(struct ice_pf *pf, struct ice_info_ctx *ctx)
+static void ice_info_pba(struct ice_pf *pf, struct ice_info_ctx *ctx)
{
struct ice_hw *hw = &pf->hw;
enum ice_status status;
@@ -45,21 +46,17 @@ static int ice_info_pba(struct ice_pf *pf, struct ice_info_ctx *ctx)
/* We failed to locate the PBA, so just skip this entry */
dev_dbg(ice_pf_to_dev(pf), "Failed to read Product Board Assembly string, status %s\n",
ice_stat_str(status));
-
- return 0;
}
-static int ice_info_fw_mgmt(struct ice_pf *pf, struct ice_info_ctx *ctx)
+static void ice_info_fw_mgmt(struct ice_pf *pf, struct ice_info_ctx *ctx)
{
struct ice_hw *hw = &pf->hw;
- snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u", hw->fw_maj_ver, hw->fw_min_ver,
- hw->fw_patch);
-
- return 0;
+ snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u",
+ hw->fw_maj_ver, hw->fw_min_ver, hw->fw_patch);
}
-static int ice_info_fw_api(struct ice_pf *pf, struct ice_info_ctx *ctx)
+static void ice_info_fw_api(struct ice_pf *pf, struct ice_info_ctx *ctx)
{
struct ice_hw *hw = &pf->hw;
@@ -69,125 +66,109 @@ static int ice_info_fw_api(struct ice_pf *pf, struct ice_info_ctx *ctx)
return 0;
}
-static int ice_info_fw_build(struct ice_pf *pf, struct ice_info_ctx *ctx)
+static void ice_info_fw_build(struct ice_pf *pf, struct ice_info_ctx *ctx)
{
struct ice_hw *hw = &pf->hw;
snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", hw->fw_build);
-
- return 0;
}
-static int ice_info_orom_ver(struct ice_pf *pf, struct ice_info_ctx *ctx)
+static void ice_info_orom_ver(struct ice_pf *pf, struct ice_info_ctx *ctx)
{
struct ice_orom_info *orom = &pf->hw.flash.orom;
- snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u", orom->major, orom->build, orom->patch);
-
- return 0;
+ snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u",
+ orom->major, orom->build, orom->patch);
}
-static int
-ice_info_pending_orom_ver(struct ice_pf __always_unused *pf, struct ice_info_ctx *ctx)
+static void
+ice_info_pending_orom_ver(struct ice_pf __always_unused *pf,
+ struct ice_info_ctx *ctx)
{
struct ice_orom_info *orom = &ctx->pending_orom;
if (ctx->dev_caps.common_cap.nvm_update_pending_orom)
snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u",
orom->major, orom->build, orom->patch);
-
- return 0;
}
-static int ice_info_nvm_ver(struct ice_pf *pf, struct ice_info_ctx *ctx)
+static void ice_info_nvm_ver(struct ice_pf *pf, struct ice_info_ctx *ctx)
{
struct ice_nvm_info *nvm = &pf->hw.flash.nvm;
snprintf(ctx->buf, sizeof(ctx->buf), "%x.%02x", nvm->major, nvm->minor);
-
- return 0;
}
-static int
-ice_info_pending_nvm_ver(struct ice_pf __always_unused *pf, struct ice_info_ctx *ctx)
+static void
+ice_info_pending_nvm_ver(struct ice_pf __always_unused *pf,
+ struct ice_info_ctx *ctx)
{
struct ice_nvm_info *nvm = &ctx->pending_nvm;
if (ctx->dev_caps.common_cap.nvm_update_pending_nvm)
- snprintf(ctx->buf, sizeof(ctx->buf), "%x.%02x", nvm->major, nvm->minor);
-
- return 0;
+ snprintf(ctx->buf, sizeof(ctx->buf), "%x.%02x",
+ nvm->major, nvm->minor);
}
-static int ice_info_eetrack(struct ice_pf *pf, struct ice_info_ctx *ctx)
+static void ice_info_eetrack(struct ice_pf *pf, struct ice_info_ctx *ctx)
{
struct ice_nvm_info *nvm = &pf->hw.flash.nvm;
snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", nvm->eetrack);
-
- return 0;
}
-static int
-ice_info_pending_eetrack(struct ice_pf __always_unused *pf, struct ice_info_ctx *ctx)
+static void
+ice_info_pending_eetrack(struct ice_pf *pf, struct ice_info_ctx *ctx)
{
struct ice_nvm_info *nvm = &ctx->pending_nvm;
if (ctx->dev_caps.common_cap.nvm_update_pending_nvm)
snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", nvm->eetrack);
-
- return 0;
}
-static int ice_info_ddp_pkg_name(struct ice_pf *pf, struct ice_info_ctx *ctx)
+static void ice_info_ddp_pkg_name(struct ice_pf *pf, struct ice_info_ctx *ctx)
{
struct ice_hw *hw = &pf->hw;
snprintf(ctx->buf, sizeof(ctx->buf), "%s", hw->active_pkg_name);
-
- return 0;
}
-static int ice_info_ddp_pkg_version(struct ice_pf *pf, struct ice_info_ctx *ctx)
+static void
+ice_info_ddp_pkg_version(struct ice_pf *pf, struct ice_info_ctx *ctx)
{
struct ice_pkg_ver *pkg = &pf->hw.active_pkg_ver;
- snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u.%u", pkg->major, pkg->minor, pkg->update,
- pkg->draft);
-
- return 0;
+ snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u.%u",
+ pkg->major, pkg->minor, pkg->update, pkg->draft);
}
-static int ice_info_ddp_pkg_bundle_id(struct ice_pf *pf, struct ice_info_ctx *ctx)
+static void
+ice_info_ddp_pkg_bundle_id(struct ice_pf *pf, struct ice_info_ctx *ctx)
{
snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", pf->hw.active_track_id);
-
- return 0;
}
-static int ice_info_netlist_ver(struct ice_pf *pf, struct ice_info_ctx *ctx)
+static void ice_info_netlist_ver(struct ice_pf *pf, struct ice_info_ctx *ctx)
{
struct ice_netlist_info *netlist = &pf->hw.flash.netlist;
/* The netlist version fields are BCD formatted */
- snprintf(ctx->buf, sizeof(ctx->buf), "%x.%x.%x-%x.%x.%x", netlist->major, netlist->minor,
- netlist->type >> 16, netlist->type & 0xFFFF, netlist->rev,
- netlist->cust_ver);
-
- return 0;
+ snprintf(ctx->buf, sizeof(ctx->buf), "%x.%x.%x-%x.%x.%x",
+ netlist->major, netlist->minor,
+ netlist->type >> 16, netlist->type & 0xFFFF,
+ netlist->rev, netlist->cust_ver);
}
-static int ice_info_netlist_build(struct ice_pf *pf, struct ice_info_ctx *ctx)
+static void ice_info_netlist_build(struct ice_pf *pf, struct ice_info_ctx *ctx)
{
struct ice_netlist_info *netlist = &pf->hw.flash.netlist;
snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", netlist->hash);
-
- return 0;
}
-static int
-ice_info_pending_netlist_ver(struct ice_pf __always_unused *pf, struct ice_info_ctx *ctx)
+static void
+ice_info_pending_netlist_ver(struct ice_pf __always_unused *pf,
+ struct ice_info_ctx *ctx)
{
struct ice_netlist_info *netlist = &ctx->pending_netlist;
@@ -195,21 +176,18 @@ ice_info_pending_netlist_ver(struct ice_pf __always_unused *pf, struct ice_info_
if (ctx->dev_caps.common_cap.nvm_update_pending_netlist)
snprintf(ctx->buf, sizeof(ctx->buf), "%x.%x.%x-%x.%x.%x",
netlist->major, netlist->minor,
- netlist->type >> 16, netlist->type & 0xFFFF, netlist->rev,
- netlist->cust_ver);
-
- return 0;
+ netlist->type >> 16, netlist->type & 0xFFFF,
+ netlist->rev, netlist->cust_ver);
}
-static int
-ice_info_pending_netlist_build(struct ice_pf __always_unused *pf, struct ice_info_ctx *ctx)
+static void
+ice_info_pending_netlist_build(struct ice_pf __always_unused *pf,
+ struct ice_info_ctx *ctx)
{
struct ice_netlist_info *netlist = &ctx->pending_netlist;
if (ctx->dev_caps.common_cap.nvm_update_pending_netlist)
snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", netlist->hash);
-
- return 0;
}
#define fixed(key, getter) { ICE_VERSION_FIXED, key, getter, NULL }
@@ -239,8 +217,8 @@ enum ice_version_type {
static const struct ice_devlink_version {
enum ice_version_type type;
const char *key;
- int (*getter)(struct ice_pf *pf, struct ice_info_ctx *ctx);
- int (*fallback)(struct ice_pf *pf, struct ice_info_ctx *ctx);
+ void (*getter)(struct ice_pf *pf, struct ice_info_ctx *ctx);
+ void (*fallback)(struct ice_pf *pf, struct ice_info_ctx *ctx);
} ice_devlink_versions[] = {
fixed(DEVLINK_INFO_VERSION_GENERIC_BOARD_ID, ice_info_pba),
running(DEVLINK_INFO_VERSION_GENERIC_FW_MGMT, ice_info_fw_mgmt),
@@ -352,24 +330,15 @@ static int ice_devlink_info_get(struct devlink *devlink,
memset(ctx->buf, 0, sizeof(ctx->buf));
- err = ice_devlink_versions[i].getter(pf, ctx);
- if (err) {
- NL_SET_ERR_MSG_MOD(extack, "Unable to obtain version info");
- goto out_free_ctx;
- }
+ ice_devlink_versions[i].getter(pf, ctx);
/* If the default getter doesn't report a version, use the
* fallback function. This is primarily useful in the case of
* "stored" versions that want to report the same value as the
* running version in the normal case of no pending update.
*/
- if (ctx->buf[0] == '\0' && ice_devlink_versions[i].fallback) {
- err = ice_devlink_versions[i].fallback(pf, ctx);
- if (err) {
- NL_SET_ERR_MSG_MOD(extack, "Unable to obtain version info");
- goto out_free_ctx;
- }
- }
+ if (ctx->buf[0] == '\0' && ice_devlink_versions[i].fallback)
+ ice_devlink_versions[i].fallback(pf, ctx);
/* Do not report missing versions */
if (ctx->buf[0] == '\0')
@@ -457,6 +426,8 @@ ice_devlink_flash_update(struct devlink *devlink,
static const struct devlink_ops ice_devlink_ops = {
.supported_flash_update_params = DEVLINK_SUPPORT_FLASH_UPDATE_OVERWRITE_MASK,
+ .eswitch_mode_get = ice_eswitch_mode_get,
+ .eswitch_mode_set = ice_eswitch_mode_set,
.info_get = ice_devlink_info_get,
.flash_update = ice_devlink_flash_update,
};
@@ -483,10 +454,8 @@ struct ice_pf *ice_allocate_pf(struct device *dev)
return NULL;
/* Add an action to teardown the devlink when unwinding the driver */
- if (devm_add_action(dev, ice_devlink_free, devlink)) {
- devlink_free(devlink);
+ if (devm_add_action_or_reset(dev, ice_devlink_free, devlink))
return NULL;
- }
return devlink_priv(devlink);
}
@@ -499,15 +468,58 @@ struct ice_pf *ice_allocate_pf(struct device *dev)
*
* Return: zero on success or an error code on failure.
*/
-int ice_devlink_register(struct ice_pf *pf)
+void ice_devlink_register(struct ice_pf *pf)
{
struct devlink *devlink = priv_to_devlink(pf);
- struct device *dev = ice_pf_to_dev(pf);
+
+ devlink_register(devlink);
+}
+
+/**
+ * ice_devlink_unregister - Unregister devlink resources for this PF.
+ * @pf: the PF structure to cleanup
+ *
+ * Releases resources used by devlink and cleans up associated memory.
+ */
+void ice_devlink_unregister(struct ice_pf *pf)
+{
+ devlink_unregister(priv_to_devlink(pf));
+}
+
+/**
+ * ice_devlink_create_pf_port - Create a devlink port for this PF
+ * @pf: the PF to create a devlink port for
+ *
+ * Create and register a devlink_port for this PF.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+int ice_devlink_create_pf_port(struct ice_pf *pf)
+{
+ struct devlink_port_attrs attrs = {};
+ struct devlink_port *devlink_port;
+ struct devlink *devlink;
+ struct ice_vsi *vsi;
+ struct device *dev;
int err;
- err = devlink_register(devlink);
+ dev = ice_pf_to_dev(pf);
+
+ devlink_port = &pf->devlink_port;
+
+ vsi = ice_get_main_vsi(pf);
+ if (!vsi)
+ return -EIO;
+
+ attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
+ attrs.phys.port_number = pf->hw.bus.func;
+ devlink_port_attrs_set(devlink_port, &attrs);
+ devlink = priv_to_devlink(pf);
+
+ err = devlink_port_register(devlink, devlink_port, vsi->idx);
if (err) {
- dev_err(dev, "devlink registration failed: %d\n", err);
+ dev_err(dev, "Failed to create devlink port for PF %d, error %d\n",
+ pf->hw.pf_id, err);
return err;
}
@@ -515,71 +527,75 @@ int ice_devlink_register(struct ice_pf *pf)
}
/**
- * ice_devlink_unregister - Unregister devlink resources for this PF.
- * @pf: the PF structure to cleanup
+ * ice_devlink_destroy_pf_port - Destroy the devlink_port for this PF
+ * @pf: the PF to cleanup
*
- * Releases resources used by devlink and cleans up associated memory.
+ * Unregisters the devlink_port structure associated with this PF.
*/
-void ice_devlink_unregister(struct ice_pf *pf)
+void ice_devlink_destroy_pf_port(struct ice_pf *pf)
{
- devlink_unregister(priv_to_devlink(pf));
+ struct devlink_port *devlink_port;
+
+ devlink_port = &pf->devlink_port;
+
+ devlink_port_type_clear(devlink_port);
+ devlink_port_unregister(devlink_port);
}
/**
- * ice_devlink_create_port - Create a devlink port for this VSI
- * @vsi: the VSI to create a port for
+ * ice_devlink_create_vf_port - Create a devlink port for this VF
+ * @vf: the VF to create a port for
*
- * Create and register a devlink_port for this VSI.
+ * Create and register a devlink_port for this VF.
*
* Return: zero on success or an error code on failure.
*/
-int ice_devlink_create_port(struct ice_vsi *vsi)
+int ice_devlink_create_vf_port(struct ice_vf *vf)
{
struct devlink_port_attrs attrs = {};
- struct ice_port_info *pi;
+ struct devlink_port *devlink_port;
struct devlink *devlink;
+ struct ice_vsi *vsi;
struct device *dev;
struct ice_pf *pf;
int err;
- /* Currently we only create devlink_port instances for PF VSIs */
- if (vsi->type != ICE_VSI_PF)
- return -EINVAL;
+ pf = vf->pf;
+ dev = ice_pf_to_dev(pf);
+ vsi = ice_get_vf_vsi(vf);
+ devlink_port = &vf->devlink_port;
+
+ attrs.flavour = DEVLINK_PORT_FLAVOUR_PCI_VF;
+ attrs.pci_vf.pf = pf->hw.bus.func;
+ attrs.pci_vf.vf = vf->vf_id;
- pf = vsi->back;
+ devlink_port_attrs_set(devlink_port, &attrs);
devlink = priv_to_devlink(pf);
- dev = ice_pf_to_dev(pf);
- pi = pf->hw.port_info;
- attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
- attrs.phys.port_number = pi->lport;
- devlink_port_attrs_set(&vsi->devlink_port, &attrs);
- err = devlink_port_register(devlink, &vsi->devlink_port, vsi->idx);
+ err = devlink_port_register(devlink, devlink_port, vsi->idx);
if (err) {
- dev_err(dev, "devlink_port_register failed: %d\n", err);
+ dev_err(dev, "Failed to create devlink port for VF %d, error %d\n",
+ vf->vf_id, err);
return err;
}
- vsi->devlink_port_registered = true;
-
return 0;
}
/**
- * ice_devlink_destroy_port - Destroy the devlink_port for this VSI
- * @vsi: the VSI to cleanup
+ * ice_devlink_destroy_vf_port - Destroy the devlink_port for this VF
+ * @vf: the VF to cleanup
*
- * Unregisters the devlink_port structure associated with this VSI.
+ * Unregisters the devlink_port structure associated with this VF.
*/
-void ice_devlink_destroy_port(struct ice_vsi *vsi)
+void ice_devlink_destroy_vf_port(struct ice_vf *vf)
{
- if (!vsi->devlink_port_registered)
- return;
+ struct devlink_port *devlink_port;
- devlink_port_type_clear(&vsi->devlink_port);
- devlink_port_unregister(&vsi->devlink_port);
+ devlink_port = &vf->devlink_port;
- vsi->devlink_port_registered = false;
+ devlink_port_type_clear(devlink_port);
+ devlink_port_unregister(devlink_port);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.h b/drivers/net/ethernet/intel/ice/ice_devlink.h
index e07e74426bde..b7f9551e4fc4 100644
--- a/drivers/net/ethernet/intel/ice/ice_devlink.h
+++ b/drivers/net/ethernet/intel/ice/ice_devlink.h
@@ -6,10 +6,12 @@
struct ice_pf *ice_allocate_pf(struct device *dev);
-int ice_devlink_register(struct ice_pf *pf);
+void ice_devlink_register(struct ice_pf *pf);
void ice_devlink_unregister(struct ice_pf *pf);
-int ice_devlink_create_port(struct ice_vsi *vsi);
-void ice_devlink_destroy_port(struct ice_vsi *vsi);
+int ice_devlink_create_pf_port(struct ice_pf *pf);
+void ice_devlink_destroy_pf_port(struct ice_pf *pf);
+int ice_devlink_create_vf_port(struct ice_vf *vf);
+void ice_devlink_destroy_vf_port(struct ice_vf *vf);
void ice_devlink_init_regions(struct ice_pf *pf);
void ice_devlink_destroy_regions(struct ice_pf *pf);
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c
new file mode 100644
index 000000000000..6cb50653b18d
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c
@@ -0,0 +1,649 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2019-2021, Intel Corporation. */
+
+#include "ice.h"
+#include "ice_lib.h"
+#include "ice_eswitch.h"
+#include "ice_fltr.h"
+#include "ice_repr.h"
+#include "ice_devlink.h"
+#include "ice_tc_lib.h"
+
+/**
+ * ice_eswitch_setup_env - configure switchdev HW filters
+ * @pf: pointer to PF struct
+ *
+ * This function adds HW filters configuration specific for switchdev
+ * mode.
+ */
+static int ice_eswitch_setup_env(struct ice_pf *pf)
+{
+ struct ice_vsi *uplink_vsi = pf->switchdev.uplink_vsi;
+ struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
+ struct ice_port_info *pi = pf->hw.port_info;
+ bool rule_added = false;
+
+ ice_vsi_manage_vlan_stripping(ctrl_vsi, false);
+
+ ice_remove_vsi_fltr(&pf->hw, uplink_vsi->idx);
+
+ if (ice_vsi_add_vlan(uplink_vsi, 0, ICE_FWD_TO_VSI))
+ goto err_def_rx;
+
+ if (!ice_is_dflt_vsi_in_use(uplink_vsi->vsw)) {
+ if (ice_set_dflt_vsi(uplink_vsi->vsw, uplink_vsi))
+ goto err_def_rx;
+ rule_added = true;
+ }
+
+ if (ice_cfg_dflt_vsi(pi->hw, ctrl_vsi->idx, true, ICE_FLTR_TX))
+ goto err_def_tx;
+
+ if (ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_set_allow_override))
+ goto err_override_uplink;
+
+ if (ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_set_allow_override))
+ goto err_override_control;
+
+ if (ice_fltr_update_flags_dflt_rule(ctrl_vsi, pi->dflt_tx_vsi_rule_id,
+ ICE_FLTR_TX,
+ ICE_SINGLE_ACT_LB_ENABLE))
+ goto err_update_action;
+
+ return 0;
+
+err_update_action:
+ ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_clear_allow_override);
+err_override_control:
+ ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override);
+err_override_uplink:
+ ice_cfg_dflt_vsi(pi->hw, ctrl_vsi->idx, false, ICE_FLTR_TX);
+err_def_tx:
+ if (rule_added)
+ ice_clear_dflt_vsi(uplink_vsi->vsw);
+err_def_rx:
+ ice_fltr_add_mac_and_broadcast(uplink_vsi,
+ uplink_vsi->port_info->mac.perm_addr,
+ ICE_FWD_TO_VSI);
+ return -ENODEV;
+}
+
+/**
+ * ice_eswitch_remap_rings_to_vectors - reconfigure rings of switchdev ctrl VSI
+ * @pf: pointer to PF struct
+ *
+ * In switchdev number of allocated Tx/Rx rings is equal.
+ *
+ * This function fills q_vectors structures associated with representor and
+ * move each ring pairs to port representor netdevs. Each port representor
+ * will have dedicated 1 Tx/Rx ring pair, so number of rings pair is equal to
+ * number of VFs.
+ */
+static void ice_eswitch_remap_rings_to_vectors(struct ice_pf *pf)
+{
+ struct ice_vsi *vsi = pf->switchdev.control_vsi;
+ int q_id;
+
+ ice_for_each_txq(vsi, q_id) {
+ struct ice_repr *repr = pf->vf[q_id].repr;
+ struct ice_q_vector *q_vector = repr->q_vector;
+ struct ice_tx_ring *tx_ring = vsi->tx_rings[q_id];
+ struct ice_rx_ring *rx_ring = vsi->rx_rings[q_id];
+
+ q_vector->vsi = vsi;
+ q_vector->reg_idx = vsi->q_vectors[0]->reg_idx;
+
+ q_vector->num_ring_tx = 1;
+ q_vector->tx.tx_ring = tx_ring;
+ tx_ring->q_vector = q_vector;
+ tx_ring->next = NULL;
+ tx_ring->netdev = repr->netdev;
+ /* In switchdev mode, from OS stack perspective, there is only
+ * one queue for given netdev, so it needs to be indexed as 0.
+ */
+ tx_ring->q_index = 0;
+
+ q_vector->num_ring_rx = 1;
+ q_vector->rx.rx_ring = rx_ring;
+ rx_ring->q_vector = q_vector;
+ rx_ring->next = NULL;
+ rx_ring->netdev = repr->netdev;
+ }
+}
+
+/**
+ * ice_eswitch_setup_reprs - configure port reprs to run in switchdev mode
+ * @pf: pointer to PF struct
+ */
+static int ice_eswitch_setup_reprs(struct ice_pf *pf)
+{
+ struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
+ int max_vsi_num = 0;
+ int i;
+
+ ice_for_each_vf(pf, i) {
+ struct ice_vsi *vsi = pf->vf[i].repr->src_vsi;
+ struct ice_vf *vf = &pf->vf[i];
+
+ ice_remove_vsi_fltr(&pf->hw, vsi->idx);
+ vf->repr->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX,
+ GFP_KERNEL);
+ if (!vf->repr->dst) {
+ ice_fltr_add_mac_and_broadcast(vsi,
+ vf->hw_lan_addr.addr,
+ ICE_FWD_TO_VSI);
+ goto err;
+ }
+
+ if (ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof)) {
+ ice_fltr_add_mac_and_broadcast(vsi,
+ vf->hw_lan_addr.addr,
+ ICE_FWD_TO_VSI);
+ metadata_dst_free(vf->repr->dst);
+ goto err;
+ }
+
+ if (ice_vsi_add_vlan(vsi, 0, ICE_FWD_TO_VSI)) {
+ ice_fltr_add_mac_and_broadcast(vsi,
+ vf->hw_lan_addr.addr,
+ ICE_FWD_TO_VSI);
+ metadata_dst_free(vf->repr->dst);
+ ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
+ goto err;
+ }
+
+ if (max_vsi_num < vsi->vsi_num)
+ max_vsi_num = vsi->vsi_num;
+
+ netif_napi_add(vf->repr->netdev, &vf->repr->q_vector->napi, ice_napi_poll,
+ NAPI_POLL_WEIGHT);
+
+ netif_keep_dst(vf->repr->netdev);
+ }
+
+ kfree(ctrl_vsi->target_netdevs);
+
+ ctrl_vsi->target_netdevs = kcalloc(max_vsi_num + 1,
+ sizeof(*ctrl_vsi->target_netdevs),
+ GFP_KERNEL);
+ if (!ctrl_vsi->target_netdevs)
+ goto err;
+
+ ice_for_each_vf(pf, i) {
+ struct ice_repr *repr = pf->vf[i].repr;
+ struct ice_vsi *vsi = repr->src_vsi;
+ struct metadata_dst *dst;
+
+ ctrl_vsi->target_netdevs[vsi->vsi_num] = repr->netdev;
+
+ dst = repr->dst;
+ dst->u.port_info.port_id = vsi->vsi_num;
+ dst->u.port_info.lower_dev = repr->netdev;
+ ice_repr_set_traffic_vsi(repr, ctrl_vsi);
+ }
+
+ return 0;
+
+err:
+ for (i = i - 1; i >= 0; i--) {
+ struct ice_vsi *vsi = pf->vf[i].repr->src_vsi;
+ struct ice_vf *vf = &pf->vf[i];
+
+ ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
+ metadata_dst_free(vf->repr->dst);
+ ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr.addr,
+ ICE_FWD_TO_VSI);
+ }
+
+ return -ENODEV;
+}
+
+/**
+ * ice_eswitch_release_reprs - clear PR VSIs configuration
+ * @pf: poiner to PF struct
+ * @ctrl_vsi: pointer to switchdev control VSI
+ */
+static void
+ice_eswitch_release_reprs(struct ice_pf *pf, struct ice_vsi *ctrl_vsi)
+{
+ int i;
+
+ kfree(ctrl_vsi->target_netdevs);
+ ice_for_each_vf(pf, i) {
+ struct ice_vsi *vsi = pf->vf[i].repr->src_vsi;
+ struct ice_vf *vf = &pf->vf[i];
+
+ ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
+ metadata_dst_free(vf->repr->dst);
+ ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr.addr,
+ ICE_FWD_TO_VSI);
+
+ netif_napi_del(&vf->repr->q_vector->napi);
+ }
+}
+
+/**
+ * ice_eswitch_update_repr - reconfigure VF port representor
+ * @vsi: VF VSI for which port representor is configured
+ */
+void ice_eswitch_update_repr(struct ice_vsi *vsi)
+{
+ struct ice_pf *pf = vsi->back;
+ struct ice_repr *repr;
+ struct ice_vf *vf;
+ int ret;
+
+ if (!ice_is_switchdev_running(pf))
+ return;
+
+ vf = &pf->vf[vsi->vf_id];
+ repr = vf->repr;
+ repr->src_vsi = vsi;
+ repr->dst->u.port_info.port_id = vsi->vsi_num;
+
+ ret = ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof);
+ if (ret) {
+ ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr.addr, ICE_FWD_TO_VSI);
+ dev_err(ice_pf_to_dev(pf), "Failed to update VF %d port representor", vsi->vf_id);
+ }
+}
+
+/**
+ * ice_eswitch_port_start_xmit - callback for packets transmit
+ * @skb: send buffer
+ * @netdev: network interface device structure
+ *
+ * Returns NETDEV_TX_OK if sent, else an error code
+ */
+netdev_tx_t
+ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct ice_netdev_priv *np;
+ struct ice_repr *repr;
+ struct ice_vsi *vsi;
+
+ np = netdev_priv(netdev);
+ vsi = np->vsi;
+
+ if (ice_is_reset_in_progress(vsi->back->state))
+ return NETDEV_TX_BUSY;
+
+ repr = ice_netdev_to_repr(netdev);
+ skb_dst_drop(skb);
+ dst_hold((struct dst_entry *)repr->dst);
+ skb_dst_set(skb, (struct dst_entry *)repr->dst);
+ skb->queue_mapping = repr->vf->vf_id;
+
+ return ice_start_xmit(skb, netdev);
+}
+
+/**
+ * ice_eswitch_set_target_vsi - set switchdev context in Tx context descriptor
+ * @skb: pointer to send buffer
+ * @off: pointer to offload struct
+ */
+void
+ice_eswitch_set_target_vsi(struct sk_buff *skb,
+ struct ice_tx_offload_params *off)
+{
+ struct metadata_dst *dst = skb_metadata_dst(skb);
+ u64 cd_cmd, dst_vsi;
+
+ if (!dst) {
+ cd_cmd = ICE_TX_CTX_DESC_SWTCH_UPLINK << ICE_TXD_CTX_QW1_CMD_S;
+ off->cd_qw1 |= (cd_cmd | ICE_TX_DESC_DTYPE_CTX);
+ } else {
+ cd_cmd = ICE_TX_CTX_DESC_SWTCH_VSI << ICE_TXD_CTX_QW1_CMD_S;
+ dst_vsi = ((u64)dst->u.port_info.port_id <<
+ ICE_TXD_CTX_QW1_VSI_S) & ICE_TXD_CTX_QW1_VSI_M;
+ off->cd_qw1 = cd_cmd | dst_vsi | ICE_TX_DESC_DTYPE_CTX;
+ }
+}
+
+/**
+ * ice_eswitch_release_env - clear switchdev HW filters
+ * @pf: pointer to PF struct
+ *
+ * This function removes HW filters configuration specific for switchdev
+ * mode and restores default legacy mode settings.
+ */
+static void ice_eswitch_release_env(struct ice_pf *pf)
+{
+ struct ice_vsi *uplink_vsi = pf->switchdev.uplink_vsi;
+ struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
+
+ ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_clear_allow_override);
+ ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override);
+ ice_cfg_dflt_vsi(&pf->hw, ctrl_vsi->idx, false, ICE_FLTR_TX);
+ ice_clear_dflt_vsi(uplink_vsi->vsw);
+ ice_fltr_add_mac_and_broadcast(uplink_vsi,
+ uplink_vsi->port_info->mac.perm_addr,
+ ICE_FWD_TO_VSI);
+}
+
+/**
+ * ice_eswitch_vsi_setup - configure switchdev control VSI
+ * @pf: pointer to PF structure
+ * @pi: pointer to port_info structure
+ */
+static struct ice_vsi *
+ice_eswitch_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
+{
+ return ice_vsi_setup(pf, pi, ICE_VSI_SWITCHDEV_CTRL, ICE_INVAL_VFID, NULL);
+}
+
+/**
+ * ice_eswitch_napi_del - remove NAPI handle for all port representors
+ * @pf: pointer to PF structure
+ */
+static void ice_eswitch_napi_del(struct ice_pf *pf)
+{
+ int i;
+
+ ice_for_each_vf(pf, i)
+ netif_napi_del(&pf->vf[i].repr->q_vector->napi);
+}
+
+/**
+ * ice_eswitch_napi_enable - enable NAPI for all port representors
+ * @pf: pointer to PF structure
+ */
+static void ice_eswitch_napi_enable(struct ice_pf *pf)
+{
+ int i;
+
+ ice_for_each_vf(pf, i)
+ napi_enable(&pf->vf[i].repr->q_vector->napi);
+}
+
+/**
+ * ice_eswitch_napi_disable - disable NAPI for all port representors
+ * @pf: pointer to PF structure
+ */
+static void ice_eswitch_napi_disable(struct ice_pf *pf)
+{
+ int i;
+
+ ice_for_each_vf(pf, i)
+ napi_disable(&pf->vf[i].repr->q_vector->napi);
+}
+
+/**
+ * ice_eswitch_set_rxdid - configure rxdid on all Rx queues from VSI
+ * @vsi: VSI to setup rxdid on
+ * @rxdid: flex descriptor id
+ */
+static void ice_eswitch_set_rxdid(struct ice_vsi *vsi, u32 rxdid)
+{
+ struct ice_hw *hw = &vsi->back->hw;
+ int i;
+
+ ice_for_each_rxq(vsi, i) {
+ struct ice_rx_ring *ring = vsi->rx_rings[i];
+ u16 pf_q = vsi->rxq_map[ring->q_index];
+
+ ice_write_qrxflxp_cntxt(hw, pf_q, rxdid, 0x3, true);
+ }
+}
+
+/**
+ * ice_eswitch_enable_switchdev - configure eswitch in switchdev mode
+ * @pf: pointer to PF structure
+ */
+static int ice_eswitch_enable_switchdev(struct ice_pf *pf)
+{
+ struct ice_vsi *ctrl_vsi;
+
+ pf->switchdev.control_vsi = ice_eswitch_vsi_setup(pf, pf->hw.port_info);
+ if (!pf->switchdev.control_vsi)
+ return -ENODEV;
+
+ ctrl_vsi = pf->switchdev.control_vsi;
+ pf->switchdev.uplink_vsi = ice_get_main_vsi(pf);
+ if (!pf->switchdev.uplink_vsi)
+ goto err_vsi;
+
+ if (ice_eswitch_setup_env(pf))
+ goto err_vsi;
+
+ if (ice_repr_add_for_all_vfs(pf))
+ goto err_repr_add;
+
+ if (ice_eswitch_setup_reprs(pf))
+ goto err_setup_reprs;
+
+ ice_eswitch_remap_rings_to_vectors(pf);
+
+ if (ice_vsi_open(ctrl_vsi))
+ goto err_setup_reprs;
+
+ ice_eswitch_napi_enable(pf);
+
+ ice_eswitch_set_rxdid(ctrl_vsi, ICE_RXDID_FLEX_NIC_2);
+
+ return 0;
+
+err_setup_reprs:
+ ice_repr_rem_from_all_vfs(pf);
+err_repr_add:
+ ice_eswitch_release_env(pf);
+err_vsi:
+ ice_vsi_release(ctrl_vsi);
+ return -ENODEV;
+}
+
+/**
+ * ice_eswitch_disable_switchdev - disable switchdev resources
+ * @pf: pointer to PF structure
+ */
+static void ice_eswitch_disable_switchdev(struct ice_pf *pf)
+{
+ struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
+
+ ice_eswitch_napi_disable(pf);
+ ice_eswitch_release_env(pf);
+ ice_eswitch_release_reprs(pf, ctrl_vsi);
+ ice_vsi_release(ctrl_vsi);
+ ice_repr_rem_from_all_vfs(pf);
+}
+
+/**
+ * ice_eswitch_mode_set - set new eswitch mode
+ * @devlink: pointer to devlink structure
+ * @mode: eswitch mode to switch to
+ * @extack: pointer to extack structure
+ */
+int
+ice_eswitch_mode_set(struct devlink *devlink, u16 mode,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_pf *pf = devlink_priv(devlink);
+
+ if (pf->eswitch_mode == mode)
+ return 0;
+
+ if (pf->num_alloc_vfs) {
+ dev_info(ice_pf_to_dev(pf), "Changing eswitch mode is allowed only if there is no VFs created");
+ NL_SET_ERR_MSG_MOD(extack, "Changing eswitch mode is allowed only if there is no VFs created");
+ return -EOPNOTSUPP;
+ }
+
+ switch (mode) {
+ case DEVLINK_ESWITCH_MODE_LEGACY:
+ dev_info(ice_pf_to_dev(pf), "PF %d changed eswitch mode to legacy",
+ pf->hw.pf_id);
+ NL_SET_ERR_MSG_MOD(extack, "Changed eswitch mode to legacy");
+ break;
+ case DEVLINK_ESWITCH_MODE_SWITCHDEV:
+ {
+ dev_info(ice_pf_to_dev(pf), "PF %d changed eswitch mode to switchdev",
+ pf->hw.pf_id);
+ NL_SET_ERR_MSG_MOD(extack, "Changed eswitch mode to switchdev");
+ break;
+ }
+ default:
+ NL_SET_ERR_MSG_MOD(extack, "Unknown eswitch mode");
+ return -EINVAL;
+ }
+
+ pf->eswitch_mode = mode;
+ return 0;
+}
+
+/**
+ * ice_eswitch_get_target_netdev - return port representor netdev
+ * @rx_ring: pointer to Rx ring
+ * @rx_desc: pointer to Rx descriptor
+ *
+ * When working in switchdev mode context (when control VSI is used), this
+ * function returns netdev of appropriate port representor. For non-switchdev
+ * context, regular netdev associated with Rx ring is returned.
+ */
+struct net_device *
+ice_eswitch_get_target_netdev(struct ice_rx_ring *rx_ring,
+ union ice_32b_rx_flex_desc *rx_desc)
+{
+ struct ice_32b_rx_flex_desc_nic_2 *desc;
+ struct ice_vsi *vsi = rx_ring->vsi;
+ struct ice_vsi *control_vsi;
+ u16 target_vsi_id;
+
+ control_vsi = vsi->back->switchdev.control_vsi;
+ if (vsi != control_vsi)
+ return rx_ring->netdev;
+
+ desc = (struct ice_32b_rx_flex_desc_nic_2 *)rx_desc;
+ target_vsi_id = le16_to_cpu(desc->src_vsi);
+
+ return vsi->target_netdevs[target_vsi_id];
+}
+
+/**
+ * ice_eswitch_mode_get - get current eswitch mode
+ * @devlink: pointer to devlink structure
+ * @mode: output parameter for current eswitch mode
+ */
+int ice_eswitch_mode_get(struct devlink *devlink, u16 *mode)
+{
+ struct ice_pf *pf = devlink_priv(devlink);
+
+ *mode = pf->eswitch_mode;
+ return 0;
+}
+
+/**
+ * ice_is_eswitch_mode_switchdev - check if eswitch mode is set to switchdev
+ * @pf: pointer to PF structure
+ *
+ * Returns true if eswitch mode is set to DEVLINK_ESWITCH_MODE_SWITCHDEV,
+ * false otherwise.
+ */
+bool ice_is_eswitch_mode_switchdev(struct ice_pf *pf)
+{
+ return pf->eswitch_mode == DEVLINK_ESWITCH_MODE_SWITCHDEV;
+}
+
+/**
+ * ice_eswitch_release - cleanup eswitch
+ * @pf: pointer to PF structure
+ */
+void ice_eswitch_release(struct ice_pf *pf)
+{
+ if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_LEGACY)
+ return;
+
+ ice_eswitch_disable_switchdev(pf);
+ pf->switchdev.is_running = false;
+}
+
+/**
+ * ice_eswitch_configure - configure eswitch
+ * @pf: pointer to PF structure
+ */
+int ice_eswitch_configure(struct ice_pf *pf)
+{
+ int status;
+
+ if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_LEGACY || pf->switchdev.is_running)
+ return 0;
+
+ status = ice_eswitch_enable_switchdev(pf);
+ if (status)
+ return status;
+
+ pf->switchdev.is_running = true;
+ return 0;
+}
+
+/**
+ * ice_eswitch_start_all_tx_queues - start Tx queues of all port representors
+ * @pf: pointer to PF structure
+ */
+static void ice_eswitch_start_all_tx_queues(struct ice_pf *pf)
+{
+ struct ice_repr *repr;
+ int i;
+
+ if (test_bit(ICE_DOWN, pf->state))
+ return;
+
+ ice_for_each_vf(pf, i) {
+ repr = pf->vf[i].repr;
+ if (repr)
+ ice_repr_start_tx_queues(repr);
+ }
+}
+
+/**
+ * ice_eswitch_stop_all_tx_queues - stop Tx queues of all port representors
+ * @pf: pointer to PF structure
+ */
+void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf)
+{
+ struct ice_repr *repr;
+ int i;
+
+ if (test_bit(ICE_DOWN, pf->state))
+ return;
+
+ ice_for_each_vf(pf, i) {
+ repr = pf->vf[i].repr;
+ if (repr)
+ ice_repr_stop_tx_queues(repr);
+ }
+}
+
+/**
+ * ice_eswitch_rebuild - rebuild eswitch
+ * @pf: pointer to PF structure
+ */
+int ice_eswitch_rebuild(struct ice_pf *pf)
+{
+ struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
+ int status;
+
+ ice_eswitch_napi_disable(pf);
+ ice_eswitch_napi_del(pf);
+
+ status = ice_eswitch_setup_env(pf);
+ if (status)
+ return status;
+
+ status = ice_eswitch_setup_reprs(pf);
+ if (status)
+ return status;
+
+ ice_eswitch_remap_rings_to_vectors(pf);
+
+ ice_replay_tc_fltrs(pf);
+
+ status = ice_vsi_open(ctrl_vsi);
+ if (status)
+ return status;
+
+ ice_eswitch_napi_enable(pf);
+ ice_eswitch_set_rxdid(ctrl_vsi, ICE_RXDID_FLEX_NIC_2);
+ ice_eswitch_start_all_tx_queues(pf);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.h b/drivers/net/ethernet/intel/ice/ice_eswitch.h
new file mode 100644
index 000000000000..364cd2a79c37
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2019-2021, Intel Corporation. */
+
+#ifndef _ICE_ESWITCH_H_
+#define _ICE_ESWITCH_H_
+
+#include <net/devlink.h>
+
+#ifdef CONFIG_ICE_SWITCHDEV
+void ice_eswitch_release(struct ice_pf *pf);
+int ice_eswitch_configure(struct ice_pf *pf);
+int ice_eswitch_rebuild(struct ice_pf *pf);
+
+int ice_eswitch_mode_get(struct devlink *devlink, u16 *mode);
+int
+ice_eswitch_mode_set(struct devlink *devlink, u16 mode,
+ struct netlink_ext_ack *extack);
+bool ice_is_eswitch_mode_switchdev(struct ice_pf *pf);
+
+void ice_eswitch_update_repr(struct ice_vsi *vsi);
+
+void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf);
+
+struct net_device *
+ice_eswitch_get_target_netdev(struct ice_rx_ring *rx_ring,
+ union ice_32b_rx_flex_desc *rx_desc);
+
+void ice_eswitch_set_target_vsi(struct sk_buff *skb,
+ struct ice_tx_offload_params *off);
+netdev_tx_t
+ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev);
+#else /* CONFIG_ICE_SWITCHDEV */
+static inline void ice_eswitch_release(struct ice_pf *pf) { }
+
+static inline void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf) { }
+
+static inline void
+ice_eswitch_set_target_vsi(struct sk_buff *skb,
+ struct ice_tx_offload_params *off) { }
+
+static inline void ice_eswitch_update_repr(struct ice_vsi *vsi) { }
+
+static inline int ice_eswitch_configure(struct ice_pf *pf)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int ice_eswitch_rebuild(struct ice_pf *pf)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int ice_eswitch_mode_get(struct devlink *devlink, u16 *mode)
+{
+ return DEVLINK_ESWITCH_MODE_LEGACY;
+}
+
+static inline int
+ice_eswitch_mode_set(struct devlink *devlink, u16 mode,
+ struct netlink_ext_ack *extack)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline bool ice_is_eswitch_mode_switchdev(struct ice_pf *pf)
+{
+ return false;
+}
+
+static inline struct net_device *
+ice_eswitch_get_target_netdev(struct ice_rx_ring *rx_ring,
+ union ice_32b_rx_flex_desc *rx_desc)
+{
+ return rx_ring->netdev;
+}
+
+static inline netdev_tx_t
+ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ return NETDEV_TX_BUSY;
+}
+#endif /* CONFIG_ICE_SWITCHDEV */
+#endif /* _ICE_ESWITCH_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index c451cf401e63..cfe96a127ed4 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -170,10 +170,9 @@ static const struct ice_priv_flag ice_gstrings_priv_flags[] = {
#define ICE_PRIV_FLAG_ARRAY_SIZE ARRAY_SIZE(ice_gstrings_priv_flags)
static void
-ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
+__ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo,
+ struct ice_vsi *vsi)
{
- struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
struct ice_orom_info *orom;
@@ -193,6 +192,15 @@ ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
strscpy(drvinfo->bus_info, pci_name(pf->pdev),
sizeof(drvinfo->bus_info));
+}
+
+static void
+ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+
+ __ice_get_drvinfo(netdev, drvinfo, np->vsi);
+
drvinfo->n_priv_flags = ICE_PRIV_FLAG_ARRAY_SIZE;
}
@@ -584,7 +592,7 @@ static bool ice_lbtest_check_frame(u8 *frame)
*
* Function sends loopback packets on a test Tx ring.
*/
-static int ice_diag_send(struct ice_ring *tx_ring, u8 *data, u16 size)
+static int ice_diag_send(struct ice_tx_ring *tx_ring, u8 *data, u16 size)
{
struct ice_tx_desc *tx_desc;
struct ice_tx_buf *tx_buf;
@@ -637,7 +645,7 @@ static int ice_diag_send(struct ice_ring *tx_ring, u8 *data, u16 size)
* Function receives loopback packets and verify their correctness.
* Returns number of received valid frames.
*/
-static int ice_lbtest_receive_frames(struct ice_ring *rx_ring)
+static int ice_lbtest_receive_frames(struct ice_rx_ring *rx_ring)
{
struct ice_rx_buf *rx_buf;
int valid_frames, i;
@@ -676,9 +684,10 @@ static u64 ice_loopback_test(struct net_device *netdev)
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *orig_vsi = np->vsi, *test_vsi;
struct ice_pf *pf = orig_vsi->back;
- struct ice_ring *tx_ring, *rx_ring;
u8 broadcast[ETH_ALEN], ret = 0;
int num_frames, valid_frames;
+ struct ice_tx_ring *tx_ring;
+ struct ice_rx_ring *rx_ring;
struct device *dev;
u8 *tx_frame;
int i;
@@ -866,10 +875,10 @@ skip_ol_tests:
netdev_info(netdev, "testing finished\n");
}
-static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
+static void
+__ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data,
+ struct ice_vsi *vsi)
{
- struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_vsi *vsi = np->vsi;
unsigned int i;
u8 *p = data;
@@ -879,6 +888,9 @@ static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
ethtool_sprintf(&p,
ice_gstrings_vsi_stats[i].stat_string);
+ if (ice_is_port_repr_netdev(netdev))
+ return;
+
ice_for_each_alloc_txq(vsi, i) {
ethtool_sprintf(&p, "tx_queue_%u_packets", i);
ethtool_sprintf(&p, "tx_queue_%u_bytes", i);
@@ -917,6 +929,13 @@ static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
}
}
+static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+
+ __ice_get_strings(netdev, stringset, data, np->vsi);
+}
+
static int
ice_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state)
{
@@ -1215,6 +1234,13 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
enum ice_status status;
bool dcbx_agent_status;
+ if (ice_get_pfc_mode(pf) == ICE_QOS_MODE_DSCP) {
+ clear_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags);
+ dev_err(dev, "QoS in L3 DSCP mode, FW Agent not allowed to start\n");
+ ret = -EOPNOTSUPP;
+ goto ethtool_exit;
+ }
+
/* Remove rule to direct LLDP packets to default VSI.
* The FW LLDP engine will now be consuming them.
*/
@@ -1312,13 +1338,13 @@ static int ice_get_sset_count(struct net_device *netdev, int sset)
}
static void
-ice_get_ethtool_stats(struct net_device *netdev,
- struct ethtool_stats __always_unused *stats, u64 *data)
+__ice_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats __always_unused *stats, u64 *data,
+ struct ice_vsi *vsi)
{
- struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
- struct ice_ring *ring;
+ struct ice_tx_ring *tx_ring;
+ struct ice_rx_ring *rx_ring;
unsigned int j;
int i = 0;
char *p;
@@ -1332,14 +1358,17 @@ ice_get_ethtool_stats(struct net_device *netdev,
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
+ if (ice_is_port_repr_netdev(netdev))
+ return;
+
/* populate per queue stats */
rcu_read_lock();
ice_for_each_alloc_txq(vsi, j) {
- ring = READ_ONCE(vsi->tx_rings[j]);
- if (ring) {
- data[i++] = ring->stats.pkts;
- data[i++] = ring->stats.bytes;
+ tx_ring = READ_ONCE(vsi->tx_rings[j]);
+ if (tx_ring) {
+ data[i++] = tx_ring->stats.pkts;
+ data[i++] = tx_ring->stats.bytes;
} else {
data[i++] = 0;
data[i++] = 0;
@@ -1347,10 +1376,10 @@ ice_get_ethtool_stats(struct net_device *netdev,
}
ice_for_each_alloc_rxq(vsi, j) {
- ring = READ_ONCE(vsi->rx_rings[j]);
- if (ring) {
- data[i++] = ring->stats.pkts;
- data[i++] = ring->stats.bytes;
+ rx_ring = READ_ONCE(vsi->rx_rings[j]);
+ if (rx_ring) {
+ data[i++] = rx_ring->stats.pkts;
+ data[i++] = rx_ring->stats.bytes;
} else {
data[i++] = 0;
data[i++] = 0;
@@ -1379,6 +1408,15 @@ ice_get_ethtool_stats(struct net_device *netdev,
}
}
+static void
+ice_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats __always_unused *stats, u64 *data)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+
+ __ice_get_ethtool_stats(netdev, stats, data, np->vsi);
+}
+
#define ICE_PHY_TYPE_LOW_MASK_MIN_1G (ICE_PHY_TYPE_LOW_100BASE_TX | \
ICE_PHY_TYPE_LOW_100M_SGMII)
@@ -2667,9 +2705,10 @@ ice_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
static int
ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
{
- struct ice_ring *tx_rings = NULL, *rx_rings = NULL;
struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_ring *xdp_rings = NULL;
+ struct ice_tx_ring *xdp_rings = NULL;
+ struct ice_tx_ring *tx_rings = NULL;
+ struct ice_rx_ring *rx_rings = NULL;
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
int i, timeout = 50, err = 0;
@@ -2718,12 +2757,12 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
/* set for the next time the netdev is started */
if (!netif_running(vsi->netdev)) {
- for (i = 0; i < vsi->alloc_txq; i++)
+ ice_for_each_alloc_txq(vsi, i)
vsi->tx_rings[i]->count = new_tx_cnt;
- for (i = 0; i < vsi->alloc_rxq; i++)
+ ice_for_each_alloc_rxq(vsi, i)
vsi->rx_rings[i]->count = new_rx_cnt;
if (ice_is_xdp_ena_vsi(vsi))
- for (i = 0; i < vsi->num_xdp_txq; i++)
+ ice_for_each_xdp_txq(vsi, i)
vsi->xdp_rings[i]->count = new_tx_cnt;
vsi->num_tx_desc = (u16)new_tx_cnt;
vsi->num_rx_desc = (u16)new_rx_cnt;
@@ -2772,7 +2811,7 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
goto free_tx;
}
- for (i = 0; i < vsi->num_xdp_txq; i++) {
+ ice_for_each_xdp_txq(vsi, i) {
/* clone ring and setup updated count */
xdp_rings[i] = *vsi->xdp_rings[i];
xdp_rings[i].count = new_tx_cnt;
@@ -2866,7 +2905,7 @@ process_link:
}
if (xdp_rings) {
- for (i = 0; i < vsi->num_xdp_txq; i++) {
+ ice_for_each_xdp_txq(vsi, i) {
ice_free_tx_ring(vsi->xdp_rings[i]);
*vsi->xdp_rings[i] = xdp_rings[i];
}
@@ -3155,6 +3194,11 @@ ice_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key,
return -EIO;
}
+ if (ice_is_adq_active(pf)) {
+ netdev_err(netdev, "Cannot change RSS params with ADQ configured.\n");
+ return -EOPNOTSUPP;
+ }
+
if (key) {
if (!vsi->rss_hkey_user) {
vsi->rss_hkey_user =
@@ -3255,7 +3299,7 @@ static u32 ice_get_combined_cnt(struct ice_vsi *vsi)
ice_for_each_q_vector(vsi, q_idx) {
struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
- if (q_vector->rx.ring && q_vector->tx.ring)
+ if (q_vector->rx.rx_ring && q_vector->tx.tx_ring)
combined++;
}
@@ -3365,6 +3409,11 @@ static int ice_set_channels(struct net_device *dev, struct ethtool_channels *ch)
if (ch->other_count != (test_bit(ICE_FLAG_FD_ENA, pf->flags) ? 1U : 0U))
return -EINVAL;
+ if (ice_is_adq_active(pf)) {
+ netdev_err(dev, "Cannot set channels with ADQ configured.\n");
+ return -EOPNOTSUPP;
+ }
+
if (test_bit(ICE_FLAG_FD_ENA, pf->flags) && pf->hw.fdir_active_fltr) {
netdev_err(dev, "Cannot set channels when Flow Director filters are active\n");
return -EOPNOTSUPP;
@@ -3466,15 +3515,9 @@ static int ice_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
return 0;
}
-enum ice_container_type {
- ICE_RX_CONTAINER,
- ICE_TX_CONTAINER,
-};
-
/**
* ice_get_rc_coalesce - get ITR values for specific ring container
* @ec: ethtool structure to fill with driver's coalesce settings
- * @c_type: container type, Rx or Tx
* @rc: ring container that the ITR values will come from
*
* Query the device for ice_ring_container specific ITR values. This is
@@ -3484,24 +3527,23 @@ enum ice_container_type {
* Returns 0 on success, negative otherwise.
*/
static int
-ice_get_rc_coalesce(struct ethtool_coalesce *ec, enum ice_container_type c_type,
- struct ice_ring_container *rc)
+ice_get_rc_coalesce(struct ethtool_coalesce *ec, struct ice_ring_container *rc)
{
- if (!rc->ring)
+ if (!rc->rx_ring)
return -EINVAL;
- switch (c_type) {
+ switch (rc->type) {
case ICE_RX_CONTAINER:
ec->use_adaptive_rx_coalesce = ITR_IS_DYNAMIC(rc);
ec->rx_coalesce_usecs = rc->itr_setting;
- ec->rx_coalesce_usecs_high = rc->ring->q_vector->intrl;
+ ec->rx_coalesce_usecs_high = rc->rx_ring->q_vector->intrl;
break;
case ICE_TX_CONTAINER:
ec->use_adaptive_tx_coalesce = ITR_IS_DYNAMIC(rc);
ec->tx_coalesce_usecs = rc->itr_setting;
break;
default:
- dev_dbg(ice_pf_to_dev(rc->ring->vsi->back), "Invalid c_type %d\n", c_type);
+ dev_dbg(ice_pf_to_dev(rc->rx_ring->vsi->back), "Invalid c_type %d\n", rc->type);
return -EINVAL;
}
@@ -3522,18 +3564,18 @@ static int
ice_get_q_coalesce(struct ice_vsi *vsi, struct ethtool_coalesce *ec, int q_num)
{
if (q_num < vsi->num_rxq && q_num < vsi->num_txq) {
- if (ice_get_rc_coalesce(ec, ICE_RX_CONTAINER,
+ if (ice_get_rc_coalesce(ec,
&vsi->rx_rings[q_num]->q_vector->rx))
return -EINVAL;
- if (ice_get_rc_coalesce(ec, ICE_TX_CONTAINER,
+ if (ice_get_rc_coalesce(ec,
&vsi->tx_rings[q_num]->q_vector->tx))
return -EINVAL;
} else if (q_num < vsi->num_rxq) {
- if (ice_get_rc_coalesce(ec, ICE_RX_CONTAINER,
+ if (ice_get_rc_coalesce(ec,
&vsi->rx_rings[q_num]->q_vector->rx))
return -EINVAL;
} else if (q_num < vsi->num_txq) {
- if (ice_get_rc_coalesce(ec, ICE_TX_CONTAINER,
+ if (ice_get_rc_coalesce(ec,
&vsi->tx_rings[q_num]->q_vector->tx))
return -EINVAL;
} else {
@@ -3585,7 +3627,6 @@ ice_get_per_q_coalesce(struct net_device *netdev, u32 q_num,
/**
* ice_set_rc_coalesce - set ITR values for specific ring container
- * @c_type: container type, Rx or Tx
* @ec: ethtool structure from user to update ITR settings
* @rc: ring container that the ITR values will come from
* @vsi: VSI associated to the ring container
@@ -3597,19 +3638,22 @@ ice_get_per_q_coalesce(struct net_device *netdev, u32 q_num,
* Returns 0 on success, negative otherwise.
*/
static int
-ice_set_rc_coalesce(enum ice_container_type c_type, struct ethtool_coalesce *ec,
+ice_set_rc_coalesce(struct ethtool_coalesce *ec,
struct ice_ring_container *rc, struct ice_vsi *vsi)
{
- const char *c_type_str = (c_type == ICE_RX_CONTAINER) ? "rx" : "tx";
+ const char *c_type_str = (rc->type == ICE_RX_CONTAINER) ? "rx" : "tx";
u32 use_adaptive_coalesce, coalesce_usecs;
struct ice_pf *pf = vsi->back;
u16 itr_setting;
- if (!rc->ring)
+ if (!rc->rx_ring)
return -EINVAL;
- switch (c_type) {
+ switch (rc->type) {
case ICE_RX_CONTAINER:
+ {
+ struct ice_q_vector *q_vector = rc->rx_ring->q_vector;
+
if (ec->rx_coalesce_usecs_high > ICE_MAX_INTRL ||
(ec->rx_coalesce_usecs_high &&
ec->rx_coalesce_usecs_high < pf->hw.intrl_gran)) {
@@ -3618,22 +3662,20 @@ ice_set_rc_coalesce(enum ice_container_type c_type, struct ethtool_coalesce *ec,
ICE_MAX_INTRL);
return -EINVAL;
}
- if (ec->rx_coalesce_usecs_high != rc->ring->q_vector->intrl &&
+ if (ec->rx_coalesce_usecs_high != q_vector->intrl &&
(ec->use_adaptive_rx_coalesce || ec->use_adaptive_tx_coalesce)) {
netdev_info(vsi->netdev, "Invalid value, %s-usecs-high cannot be changed if adaptive-tx or adaptive-rx is enabled\n",
c_type_str);
return -EINVAL;
}
- if (ec->rx_coalesce_usecs_high != rc->ring->q_vector->intrl) {
- rc->ring->q_vector->intrl = ec->rx_coalesce_usecs_high;
- ice_write_intrl(rc->ring->q_vector,
- ec->rx_coalesce_usecs_high);
- }
+ if (ec->rx_coalesce_usecs_high != q_vector->intrl)
+ q_vector->intrl = ec->rx_coalesce_usecs_high;
use_adaptive_coalesce = ec->use_adaptive_rx_coalesce;
coalesce_usecs = ec->rx_coalesce_usecs;
break;
+ }
case ICE_TX_CONTAINER:
use_adaptive_coalesce = ec->use_adaptive_tx_coalesce;
coalesce_usecs = ec->tx_coalesce_usecs;
@@ -3641,7 +3683,7 @@ ice_set_rc_coalesce(enum ice_container_type c_type, struct ethtool_coalesce *ec,
break;
default:
dev_dbg(ice_pf_to_dev(pf), "Invalid container type %d\n",
- c_type);
+ rc->type);
return -EINVAL;
}
@@ -3690,22 +3732,22 @@ static int
ice_set_q_coalesce(struct ice_vsi *vsi, struct ethtool_coalesce *ec, int q_num)
{
if (q_num < vsi->num_rxq && q_num < vsi->num_txq) {
- if (ice_set_rc_coalesce(ICE_RX_CONTAINER, ec,
+ if (ice_set_rc_coalesce(ec,
&vsi->rx_rings[q_num]->q_vector->rx,
vsi))
return -EINVAL;
- if (ice_set_rc_coalesce(ICE_TX_CONTAINER, ec,
+ if (ice_set_rc_coalesce(ec,
&vsi->tx_rings[q_num]->q_vector->tx,
vsi))
return -EINVAL;
} else if (q_num < vsi->num_rxq) {
- if (ice_set_rc_coalesce(ICE_RX_CONTAINER, ec,
+ if (ice_set_rc_coalesce(ec,
&vsi->rx_rings[q_num]->q_vector->rx,
vsi))
return -EINVAL;
} else if (q_num < vsi->num_txq) {
- if (ice_set_rc_coalesce(ICE_TX_CONTAINER, ec,
+ if (ice_set_rc_coalesce(ec,
&vsi->tx_rings[q_num]->q_vector->tx,
vsi))
return -EINVAL;
@@ -3778,6 +3820,8 @@ __ice_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec,
if (ice_set_q_coalesce(vsi, ec, v_idx))
return -EINVAL;
+
+ ice_set_q_vector_intrl(vsi->q_vectors[v_idx]);
}
goto set_complete;
}
@@ -3785,6 +3829,8 @@ __ice_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec,
if (ice_set_q_coalesce(vsi, ec, q_num))
return -EINVAL;
+ ice_set_q_vector_intrl(vsi->q_vectors[q_num]);
+
set_complete:
return 0;
}
@@ -3804,6 +3850,54 @@ ice_set_per_q_coalesce(struct net_device *netdev, u32 q_num,
return __ice_set_coalesce(netdev, ec, q_num);
}
+static void
+ice_repr_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct ice_repr *repr = ice_netdev_to_repr(netdev);
+
+ if (ice_check_vf_ready_for_cfg(repr->vf))
+ return;
+
+ __ice_get_drvinfo(netdev, drvinfo, repr->src_vsi);
+}
+
+static void
+ice_repr_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
+{
+ struct ice_repr *repr = ice_netdev_to_repr(netdev);
+
+ /* for port representors only ETH_SS_STATS is supported */
+ if (ice_check_vf_ready_for_cfg(repr->vf) ||
+ stringset != ETH_SS_STATS)
+ return;
+
+ __ice_get_strings(netdev, stringset, data, repr->src_vsi);
+}
+
+static void
+ice_repr_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats __always_unused *stats,
+ u64 *data)
+{
+ struct ice_repr *repr = ice_netdev_to_repr(netdev);
+
+ if (ice_check_vf_ready_for_cfg(repr->vf))
+ return;
+
+ __ice_get_ethtool_stats(netdev, stats, data, repr->src_vsi);
+}
+
+static int ice_repr_get_sset_count(struct net_device *netdev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return ICE_VSI_STATS_LEN;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
#define ICE_I2C_EEPROM_DEV_ADDR 0xA0
#define ICE_I2C_EEPROM_DEV_ADDR2 0xA2
#define ICE_MODULE_TYPE_SFP 0x03
@@ -4055,6 +4149,23 @@ void ice_set_ethtool_safe_mode_ops(struct net_device *netdev)
netdev->ethtool_ops = &ice_ethtool_safe_mode_ops;
}
+static const struct ethtool_ops ice_ethtool_repr_ops = {
+ .get_drvinfo = ice_repr_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_strings = ice_repr_get_strings,
+ .get_ethtool_stats = ice_repr_get_ethtool_stats,
+ .get_sset_count = ice_repr_get_sset_count,
+};
+
+/**
+ * ice_set_ethtool_repr_ops - setup VF's port representor ethtool ops
+ * @netdev: network interface device structure
+ */
+void ice_set_ethtool_repr_ops(struct net_device *netdev)
+{
+ netdev->ethtool_ops = &ice_ethtool_repr_ops;
+}
+
/**
* ice_set_ethtool_ops - setup netdev ethtool ops
* @netdev: network interface device structure
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
index 16de603b280c..38960bcc384c 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
@@ -706,7 +706,7 @@ ice_create_init_fdir_rule(struct ice_pf *pf, enum ice_fltr_ptype flow)
if (!seg)
return -ENOMEM;
- tun_seg = devm_kzalloc(dev, sizeof(*seg) * ICE_FD_HW_SEG_MAX,
+ tun_seg = devm_kcalloc(dev, sizeof(*seg), ICE_FD_HW_SEG_MAX,
GFP_KERNEL);
if (!tun_seg) {
devm_kfree(dev, seg);
@@ -1068,7 +1068,7 @@ ice_cfg_fdir_xtrct_seq(struct ice_pf *pf, struct ethtool_rx_flow_spec *fsp,
if (!seg)
return -ENOMEM;
- tun_seg = devm_kzalloc(dev, sizeof(*seg) * ICE_FD_HW_SEG_MAX,
+ tun_seg = devm_kcalloc(dev, sizeof(*seg), ICE_FD_HW_SEG_MAX,
GFP_KERNEL);
if (!tun_seg) {
devm_kfree(dev, seg);
diff --git a/drivers/net/ethernet/intel/ice/ice_fdir.c b/drivers/net/ethernet/intel/ice/ice_fdir.c
index 59ef68f072c0..cbd8424631e3 100644
--- a/drivers/net/ethernet/intel/ice/ice_fdir.c
+++ b/drivers/net/ethernet/intel/ice/ice_fdir.c
@@ -952,7 +952,7 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input,
ice_pkt_insert_u8(loc, ICE_IPV4_TTL_OFFSET, input->ip.v4.ttl);
ice_pkt_insert_mac_addr(loc, input->ext_data.dst_mac);
if (frag)
- loc[20] = ICE_FDIR_IPV4_PKT_FLAG_DF;
+ loc[20] = ICE_FDIR_IPV4_PKT_FLAG_MF;
break;
case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
ice_pkt_insert_u32(loc, ICE_IPV4_DST_ADDR_OFFSET,
diff --git a/drivers/net/ethernet/intel/ice/ice_fdir.h b/drivers/net/ethernet/intel/ice/ice_fdir.h
index d2d40e18ae8a..da4163856f4c 100644
--- a/drivers/net/ethernet/intel/ice/ice_fdir.h
+++ b/drivers/net/ethernet/intel/ice/ice_fdir.h
@@ -48,7 +48,7 @@
* requests that the packet not be fragmented. MF indicates that a packet has
* been fragmented.
*/
-#define ICE_FDIR_IPV4_PKT_FLAG_DF 0x20
+#define ICE_FDIR_IPV4_PKT_FLAG_MF 0x20
enum ice_fltr_prgm_desc_dest {
ICE_FLTR_PRGM_DESC_DEST_DROP_PKT,
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
index 1ac96dc66d0d..e731b46270c3 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
@@ -735,7 +735,7 @@ static void ice_release_global_cfg_lock(struct ice_hw *hw)
*
* This function will request ownership of the change lock.
*/
-static enum ice_status
+enum ice_status
ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access)
{
return ice_acquire_res(hw, ICE_CHANGE_LOCK_RES_ID, access,
@@ -748,7 +748,7 @@ ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access)
*
* This function will release the change lock using the proper Admin Command.
*/
-static void ice_release_change_lock(struct ice_hw *hw)
+void ice_release_change_lock(struct ice_hw *hw)
{
ice_release_res(hw, ICE_CHANGE_LOCK_RES_ID);
}
@@ -1330,6 +1330,86 @@ fw_ddp_compat_free_alloc:
}
/**
+ * ice_sw_fv_handler
+ * @sect_type: section type
+ * @section: pointer to section
+ * @index: index of the field vector entry to be returned
+ * @offset: ptr to variable that receives the offset in the field vector table
+ *
+ * This is a callback function that can be passed to ice_pkg_enum_entry.
+ * This function treats the given section as of type ice_sw_fv_section and
+ * enumerates offset field. "offset" is an index into the field vector table.
+ */
+static void *
+ice_sw_fv_handler(u32 sect_type, void *section, u32 index, u32 *offset)
+{
+ struct ice_sw_fv_section *fv_section = section;
+
+ if (!section || sect_type != ICE_SID_FLD_VEC_SW)
+ return NULL;
+ if (index >= le16_to_cpu(fv_section->count))
+ return NULL;
+ if (offset)
+ /* "index" passed in to this function is relative to a given
+ * 4k block. To get to the true index into the field vector
+ * table need to add the relative index to the base_offset
+ * field of this section
+ */
+ *offset = le16_to_cpu(fv_section->base_offset) + index;
+ return fv_section->fv + index;
+}
+
+/**
+ * ice_get_prof_index_max - get the max profile index for used profile
+ * @hw: pointer to the HW struct
+ *
+ * Calling this function will get the max profile index for used profile
+ * and store the index number in struct ice_switch_info *switch_info
+ * in HW for following use.
+ */
+static enum ice_status ice_get_prof_index_max(struct ice_hw *hw)
+{
+ u16 prof_index = 0, j, max_prof_index = 0;
+ struct ice_pkg_enum state;
+ struct ice_seg *ice_seg;
+ bool flag = false;
+ struct ice_fv *fv;
+ u32 offset;
+
+ memset(&state, 0, sizeof(state));
+
+ if (!hw->seg)
+ return ICE_ERR_PARAM;
+
+ ice_seg = hw->seg;
+
+ do {
+ fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
+ &offset, ice_sw_fv_handler);
+ if (!fv)
+ break;
+ ice_seg = NULL;
+
+ /* in the profile that not be used, the prot_id is set to 0xff
+ * and the off is set to 0x1ff for all the field vectors.
+ */
+ for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
+ if (fv->ew[j].prot_id != ICE_PROT_INVALID ||
+ fv->ew[j].off != ICE_FV_OFFSET_INVAL)
+ flag = true;
+ if (flag && prof_index > max_prof_index)
+ max_prof_index = prof_index;
+
+ prof_index++;
+ flag = false;
+ } while (fv);
+
+ hw->switch_info->max_used_prof_index = max_prof_index;
+
+ return 0;
+}
+
+/**
* ice_init_pkg - initialize/download package
* @hw: pointer to the hardware structure
* @buf: pointer to the package buffer
@@ -1408,6 +1488,7 @@ enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len)
*/
ice_init_pkg_regs(hw);
ice_fill_blk_tbls(hw);
+ ice_get_prof_index_max(hw);
} else {
ice_debug(hw, ICE_DBG_INIT, "package load failed, %d\n",
status);
@@ -1485,6 +1566,167 @@ static struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw)
}
/**
+ * ice_get_sw_fv_bitmap - Get switch field vector bitmap based on profile type
+ * @hw: pointer to hardware structure
+ * @req_profs: type of profiles requested
+ * @bm: pointer to memory for returning the bitmap of field vectors
+ */
+void
+ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type req_profs,
+ unsigned long *bm)
+{
+ struct ice_pkg_enum state;
+ struct ice_seg *ice_seg;
+ struct ice_fv *fv;
+
+ if (req_profs == ICE_PROF_ALL) {
+ bitmap_set(bm, 0, ICE_MAX_NUM_PROFILES);
+ return;
+ }
+
+ memset(&state, 0, sizeof(state));
+ bitmap_zero(bm, ICE_MAX_NUM_PROFILES);
+ ice_seg = hw->seg;
+ do {
+ u32 offset;
+
+ fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
+ &offset, ice_sw_fv_handler);
+ ice_seg = NULL;
+
+ if (fv) {
+ if (req_profs & ICE_PROF_NON_TUN)
+ set_bit((u16)offset, bm);
+ }
+ } while (fv);
+}
+
+/**
+ * ice_get_sw_fv_list
+ * @hw: pointer to the HW structure
+ * @prot_ids: field vector to search for with a given protocol ID
+ * @ids_cnt: lookup/protocol count
+ * @bm: bitmap of field vectors to consider
+ * @fv_list: Head of a list
+ *
+ * Finds all the field vector entries from switch block that contain
+ * a given protocol ID and returns a list of structures of type
+ * "ice_sw_fv_list_entry". Every structure in the list has a field vector
+ * definition and profile ID information
+ * NOTE: The caller of the function is responsible for freeing the memory
+ * allocated for every list entry.
+ */
+enum ice_status
+ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt,
+ unsigned long *bm, struct list_head *fv_list)
+{
+ struct ice_sw_fv_list_entry *fvl;
+ struct ice_sw_fv_list_entry *tmp;
+ struct ice_pkg_enum state;
+ struct ice_seg *ice_seg;
+ struct ice_fv *fv;
+ u32 offset;
+
+ memset(&state, 0, sizeof(state));
+
+ if (!ids_cnt || !hw->seg)
+ return ICE_ERR_PARAM;
+
+ ice_seg = hw->seg;
+ do {
+ u16 i;
+
+ fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
+ &offset, ice_sw_fv_handler);
+ if (!fv)
+ break;
+ ice_seg = NULL;
+
+ /* If field vector is not in the bitmap list, then skip this
+ * profile.
+ */
+ if (!test_bit((u16)offset, bm))
+ continue;
+
+ for (i = 0; i < ids_cnt; i++) {
+ int j;
+
+ /* This code assumes that if a switch field vector line
+ * has a matching protocol, then this line will contain
+ * the entries necessary to represent every field in
+ * that protocol header.
+ */
+ for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
+ if (fv->ew[j].prot_id == prot_ids[i])
+ break;
+ if (j >= hw->blk[ICE_BLK_SW].es.fvw)
+ break;
+ if (i + 1 == ids_cnt) {
+ fvl = devm_kzalloc(ice_hw_to_dev(hw),
+ sizeof(*fvl), GFP_KERNEL);
+ if (!fvl)
+ goto err;
+ fvl->fv_ptr = fv;
+ fvl->profile_id = offset;
+ list_add(&fvl->list_entry, fv_list);
+ break;
+ }
+ }
+ } while (fv);
+ if (list_empty(fv_list))
+ return ICE_ERR_CFG;
+ return 0;
+
+err:
+ list_for_each_entry_safe(fvl, tmp, fv_list, list_entry) {
+ list_del(&fvl->list_entry);
+ devm_kfree(ice_hw_to_dev(hw), fvl);
+ }
+
+ return ICE_ERR_NO_MEMORY;
+}
+
+/**
+ * ice_init_prof_result_bm - Initialize the profile result index bitmap
+ * @hw: pointer to hardware structure
+ */
+void ice_init_prof_result_bm(struct ice_hw *hw)
+{
+ struct ice_pkg_enum state;
+ struct ice_seg *ice_seg;
+ struct ice_fv *fv;
+
+ memset(&state, 0, sizeof(state));
+
+ if (!hw->seg)
+ return;
+
+ ice_seg = hw->seg;
+ do {
+ u32 off;
+ u16 i;
+
+ fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
+ &off, ice_sw_fv_handler);
+ ice_seg = NULL;
+ if (!fv)
+ break;
+
+ bitmap_zero(hw->switch_info->prof_res_bm[off],
+ ICE_MAX_FV_WORDS);
+
+ /* Determine empty field vector indices, these can be
+ * used for recipe results. Skip index 0, since it is
+ * always used for Switch ID.
+ */
+ for (i = 1; i < ICE_MAX_FV_WORDS; i++)
+ if (fv->ew[i].prot_id == ICE_PROT_INVALID &&
+ fv->ew[i].off == ICE_FV_OFFSET_INVAL)
+ set_bit(i, hw->switch_info->prof_res_bm[off]);
+ } while (fv);
+}
+
+/**
* ice_pkg_buf_free
* @hw: pointer to the HW structure
* @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
@@ -1863,6 +2105,35 @@ int ice_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table,
return 0;
}
+/**
+ * ice_find_prot_off - find prot ID and offset pair, based on prof and FV index
+ * @hw: pointer to the hardware structure
+ * @blk: hardware block
+ * @prof: profile ID
+ * @fv_idx: field vector word index
+ * @prot: variable to receive the protocol ID
+ * @off: variable to receive the protocol offset
+ */
+enum ice_status
+ice_find_prot_off(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 fv_idx,
+ u8 *prot, u16 *off)
+{
+ struct ice_fv_word *fv_ext;
+
+ if (prof >= hw->blk[blk].es.count)
+ return ICE_ERR_PARAM;
+
+ if (fv_idx >= hw->blk[blk].es.fvw)
+ return ICE_ERR_PARAM;
+
+ fv_ext = hw->blk[blk].es.t + (prof * hw->blk[blk].es.fvw);
+
+ *prot = fv_ext[fv_idx].prot_id;
+ *off = fv_ext[fv_idx].off;
+
+ return 0;
+}
+
/* PTG Management */
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
index 8a58e79729b9..344c2637facd 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
@@ -18,6 +18,20 @@
#define ICE_PKG_CNT 4
+enum ice_status
+ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access);
+void ice_release_change_lock(struct ice_hw *hw);
+enum ice_status
+ice_find_prot_off(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 fv_idx,
+ u8 *prot, u16 *off);
+void
+ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type type,
+ unsigned long *bm);
+void
+ice_init_prof_result_bm(struct ice_hw *hw);
+enum ice_status
+ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt,
+ unsigned long *bm, struct list_head *fv_list);
bool
ice_get_open_tunnel_port(struct ice_hw *hw, u16 *port);
int ice_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_type.h b/drivers/net/ethernet/intel/ice/ice_flex_type.h
index 7d8b517a63c9..120bcebaa080 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_flex_type.h
@@ -13,6 +13,8 @@ struct ice_fv_word {
u8 resvrd;
} __packed;
+#define ICE_MAX_NUM_PROFILES 256
+
#define ICE_MAX_FV_WORDS 48
struct ice_fv {
struct ice_fv_word ew[ICE_MAX_FV_WORDS];
@@ -279,6 +281,12 @@ struct ice_sw_fv_section {
struct ice_fv fv[];
};
+struct ice_sw_fv_list_entry {
+ struct list_head list_entry;
+ u32 profile_id;
+ struct ice_fv *fv_ptr;
+};
+
/* The BOOST TCAM stores the match packet header in reverse order, meaning
* the fields are reversed; in addition, this means that the normally big endian
* fields of the packet are now little endian.
@@ -603,4 +611,9 @@ struct ice_chs_chg {
};
#define ICE_FLOW_PTYPE_MAX ICE_XLT1_CNT
+
+enum ice_prof_type {
+ ICE_PROF_NON_TUN = 0x1,
+ ICE_PROF_ALL = 0xFF,
+};
#endif /* _ICE_FLEX_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_fltr.c b/drivers/net/ethernet/intel/ice/ice_fltr.c
index 2418d4fff037..c2e78eaf4ccb 100644
--- a/drivers/net/ethernet/intel/ice/ice_fltr.c
+++ b/drivers/net/ethernet/intel/ice/ice_fltr.c
@@ -395,3 +395,83 @@ enum ice_status ice_fltr_remove_eth(struct ice_vsi *vsi, u16 ethertype,
return ice_fltr_prepare_eth(vsi, ethertype, flag, action,
ice_fltr_remove_eth_list);
}
+
+/**
+ * ice_fltr_update_rule_flags - update lan_en/lb_en flags
+ * @hw: pointer to hw
+ * @rule_id: id of rule being updated
+ * @recipe_id: recipe id of rule
+ * @act: current action field
+ * @type: Rx or Tx
+ * @src: source VSI
+ * @new_flags: combinations of lb_en and lan_en
+ */
+static enum ice_status
+ice_fltr_update_rule_flags(struct ice_hw *hw, u16 rule_id, u16 recipe_id,
+ u32 act, u16 type, u16 src, u32 new_flags)
+{
+ struct ice_aqc_sw_rules_elem *s_rule;
+ enum ice_status err;
+ u32 flags_mask;
+
+ s_rule = kzalloc(ICE_SW_RULE_RX_TX_NO_HDR_SIZE, GFP_KERNEL);
+ if (!s_rule)
+ return ICE_ERR_NO_MEMORY;
+
+ flags_mask = ICE_SINGLE_ACT_LB_ENABLE | ICE_SINGLE_ACT_LAN_ENABLE;
+ act &= ~flags_mask;
+ act |= (flags_mask & new_flags);
+
+ s_rule->pdata.lkup_tx_rx.recipe_id = cpu_to_le16(recipe_id);
+ s_rule->pdata.lkup_tx_rx.index = cpu_to_le16(rule_id);
+ s_rule->pdata.lkup_tx_rx.act = cpu_to_le32(act);
+
+ if (type & ICE_FLTR_RX) {
+ s_rule->pdata.lkup_tx_rx.src =
+ cpu_to_le16(hw->port_info->lport);
+ s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX);
+
+ } else {
+ s_rule->pdata.lkup_tx_rx.src = cpu_to_le16(src);
+ s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX);
+ }
+
+ err = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
+ ice_aqc_opc_update_sw_rules, NULL);
+
+ kfree(s_rule);
+ return err;
+}
+
+/**
+ * ice_fltr_build_action - build action for rule
+ * @vsi_id: id of VSI which is use to build action
+ */
+static u32 ice_fltr_build_action(u16 vsi_id)
+{
+ return ((vsi_id << ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M) |
+ ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
+}
+
+/**
+ * ice_fltr_update_flags_dflt_rule - update flags on default rule
+ * @vsi: pointer to VSI
+ * @rule_id: id of rule
+ * @direction: Tx or Rx
+ * @new_flags: flags to update
+ *
+ * Function updates flags on default rule with ICE_SW_LKUP_DFLT.
+ *
+ * Flags should be a combination of ICE_SINGLE_ACT_LB_ENABLE and
+ * ICE_SINGLE_ACT_LAN_ENABLE.
+ */
+enum ice_status
+ice_fltr_update_flags_dflt_rule(struct ice_vsi *vsi, u16 rule_id, u8 direction,
+ u32 new_flags)
+{
+ u32 action = ice_fltr_build_action(vsi->vsi_num);
+ struct ice_hw *hw = &vsi->back->hw;
+
+ return ice_fltr_update_rule_flags(hw, rule_id, ICE_SW_LKUP_DFLT, action,
+ direction, vsi->vsi_num, new_flags);
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_fltr.h b/drivers/net/ethernet/intel/ice/ice_fltr.h
index 361cb4da9b43..8eec4febead1 100644
--- a/drivers/net/ethernet/intel/ice/ice_fltr.h
+++ b/drivers/net/ethernet/intel/ice/ice_fltr.h
@@ -36,4 +36,7 @@ enum ice_status
ice_fltr_remove_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag,
enum ice_sw_fwd_act_type action);
void ice_fltr_remove_all(struct ice_vsi *vsi);
+enum ice_status
+ice_fltr_update_flags_dflt_rule(struct ice_vsi *vsi, u16 rule_id, u8 direction,
+ u32 new_flags);
#endif
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
index 76021d977b60..a49082485642 100644
--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
@@ -182,6 +182,7 @@
#define GLINT_DYN_CTL_INTERVAL_S 5
#define GLINT_DYN_CTL_INTERVAL_M ICE_M(0xFFF, 5)
#define GLINT_DYN_CTL_SW_ITR_INDX_ENA_M BIT(24)
+#define GLINT_DYN_CTL_SW_ITR_INDX_S 25
#define GLINT_DYN_CTL_SW_ITR_INDX_M ICE_M(0x3, 25)
#define GLINT_DYN_CTL_WB_ON_ITR_M BIT(30)
#define GLINT_DYN_CTL_INTENA_MSK_M BIT(31)
diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
index 80736e0ec0dc..d981dc6f2323 100644
--- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
+++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
@@ -301,6 +301,46 @@ struct ice_32b_rx_flex_desc_nic {
} flex_ts;
};
+/* Rx Flex Descriptor NIC Profile
+ * RxDID Profile ID 6
+ * Flex-field 0: RSS hash lower 16-bits
+ * Flex-field 1: RSS hash upper 16-bits
+ * Flex-field 2: Flow ID lower 16-bits
+ * Flex-field 3: Source VSI
+ * Flex-field 4: reserved, VLAN ID taken from L2Tag
+ */
+struct ice_32b_rx_flex_desc_nic_2 {
+ /* Qword 0 */
+ u8 rxdid;
+ u8 mir_id_umb_cast;
+ __le16 ptype_flexi_flags0;
+ __le16 pkt_len;
+ __le16 hdr_len_sph_flex_flags1;
+
+ /* Qword 1 */
+ __le16 status_error0;
+ __le16 l2tag1;
+ __le32 rss_hash;
+
+ /* Qword 2 */
+ __le16 status_error1;
+ u8 flexi_flags2;
+ u8 ts_low;
+ __le16 l2tag2_1st;
+ __le16 l2tag2_2nd;
+
+ /* Qword 3 */
+ __le16 flow_id;
+ __le16 src_vsi;
+ union {
+ struct {
+ __le16 rsvd;
+ __le16 flow_id_ipv6;
+ } flex;
+ __le32 ts_high;
+ } flex_ts;
+};
+
/* Receive Flex Descriptor profile IDs: There are a total
* of 64 profiles where profile IDs 0/1 are for legacy; and
* profiles 2-63 are flex profiles that can be programmed
@@ -529,6 +569,9 @@ struct ice_tx_ctx_desc {
#define ICE_TXD_CTX_QW1_MSS_S 50
+#define ICE_TXD_CTX_QW1_VSI_S 50
+#define ICE_TXD_CTX_QW1_VSI_M (0x3FFULL << ICE_TXD_CTX_QW1_VSI_S)
+
enum ice_tx_ctx_desc_cmd_bits {
ICE_TX_CTX_DESC_TSO = 0x01,
ICE_TX_CTX_DESC_TSYN = 0x02,
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index b718e196af2a..77dceab9fbbe 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -22,8 +22,12 @@ const char *ice_vsi_type_str(enum ice_vsi_type vsi_type)
return "ICE_VSI_VF";
case ICE_VSI_CTRL:
return "ICE_VSI_CTRL";
+ case ICE_VSI_CHNL:
+ return "ICE_VSI_CHNL";
case ICE_VSI_LB:
return "ICE_VSI_LB";
+ case ICE_VSI_SWITCHDEV_CTRL:
+ return "ICE_VSI_SWITCHDEV_CTRL";
default:
return "unknown";
}
@@ -44,12 +48,12 @@ static int ice_vsi_ctrl_all_rx_rings(struct ice_vsi *vsi, bool ena)
int ret = 0;
u16 i;
- for (i = 0; i < vsi->num_rxq; i++)
+ ice_for_each_rxq(vsi, i)
ice_vsi_ctrl_one_rx_ring(vsi, ena, i, false);
ice_flush(&vsi->back->hw);
- for (i = 0; i < vsi->num_rxq; i++) {
+ ice_for_each_rxq(vsi, i) {
ret = ice_vsi_wait_one_rx_ring(vsi, ena, i);
if (ret)
break;
@@ -71,6 +75,8 @@ static int ice_vsi_alloc_arrays(struct ice_vsi *vsi)
struct device *dev;
dev = ice_pf_to_dev(pf);
+ if (vsi->type == ICE_VSI_CHNL)
+ return 0;
/* allocate memory for both Tx and Rx ring pointers */
vsi->tx_rings = devm_kcalloc(dev, vsi->alloc_txq,
@@ -132,6 +138,7 @@ static void ice_vsi_set_num_desc(struct ice_vsi *vsi)
{
switch (vsi->type) {
case ICE_VSI_PF:
+ case ICE_VSI_SWITCHDEV_CTRL:
case ICE_VSI_CTRL:
case ICE_VSI_LB:
/* a user could change the values of num_[tr]x_desc using
@@ -200,6 +207,14 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id)
max_t(int, vsi->alloc_rxq,
vsi->alloc_txq));
break;
+ case ICE_VSI_SWITCHDEV_CTRL:
+ /* The number of queues for ctrl VSI is equal to number of VFs.
+ * Each ring is associated to the corresponding VF_PR netdev.
+ */
+ vsi->alloc_txq = pf->num_alloc_vfs;
+ vsi->alloc_rxq = pf->num_alloc_vfs;
+ vsi->num_q_vectors = 1;
+ break;
case ICE_VSI_VF:
vf = &pf->vf[vsi->vf_id];
if (vf->num_req_qs)
@@ -218,6 +233,10 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id)
vsi->alloc_rxq = 1;
vsi->num_q_vectors = 1;
break;
+ case ICE_VSI_CHNL:
+ vsi->alloc_txq = 0;
+ vsi->alloc_rxq = 0;
+ break;
case ICE_VSI_LB:
vsi->alloc_txq = 1;
vsi->alloc_rxq = 1;
@@ -263,7 +282,7 @@ static int ice_get_free_slot(void *array, int size, int curr)
* ice_vsi_delete - delete a VSI from the switch
* @vsi: pointer to VSI being removed
*/
-static void ice_vsi_delete(struct ice_vsi *vsi)
+void ice_vsi_delete(struct ice_vsi *vsi)
{
struct ice_pf *pf = vsi->back;
struct ice_vsi_ctx *ctxt;
@@ -334,7 +353,7 @@ static void ice_vsi_free_arrays(struct ice_vsi *vsi)
*
* Returns 0 on success, negative on failure
*/
-static int ice_vsi_clear(struct ice_vsi *vsi)
+int ice_vsi_clear(struct ice_vsi *vsi)
{
struct ice_pf *pf = NULL;
struct device *dev;
@@ -379,12 +398,12 @@ static irqreturn_t ice_msix_clean_ctrl_vsi(int __always_unused irq, void *data)
{
struct ice_q_vector *q_vector = (struct ice_q_vector *)data;
- if (!q_vector->tx.ring)
+ if (!q_vector->tx.tx_ring)
return IRQ_HANDLED;
#define FDIR_RX_DESC_CLEAN_BUDGET 64
- ice_clean_rx_irq(q_vector->rx.ring, FDIR_RX_DESC_CLEAN_BUDGET);
- ice_clean_ctrl_tx_irq(q_vector->tx.ring);
+ ice_clean_rx_irq(q_vector->rx.rx_ring, FDIR_RX_DESC_CLEAN_BUDGET);
+ ice_clean_ctrl_tx_irq(q_vector->tx.tx_ring);
return IRQ_HANDLED;
}
@@ -398,7 +417,7 @@ static irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data)
{
struct ice_q_vector *q_vector = (struct ice_q_vector *)data;
- if (!q_vector->tx.ring && !q_vector->rx.ring)
+ if (!q_vector->tx.tx_ring && !q_vector->rx.rx_ring)
return IRQ_HANDLED;
q_vector->total_events++;
@@ -408,16 +427,33 @@ static irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data)
return IRQ_HANDLED;
}
+static irqreturn_t ice_eswitch_msix_clean_rings(int __always_unused irq, void *data)
+{
+ struct ice_q_vector *q_vector = (struct ice_q_vector *)data;
+ struct ice_pf *pf = q_vector->vsi->back;
+ int i;
+
+ if (!q_vector->tx.tx_ring && !q_vector->rx.rx_ring)
+ return IRQ_HANDLED;
+
+ ice_for_each_vf(pf, i)
+ napi_schedule(&pf->vf[i].repr->q_vector->napi);
+
+ return IRQ_HANDLED;
+}
+
/**
* ice_vsi_alloc - Allocates the next available struct VSI in the PF
* @pf: board private structure
* @vsi_type: type of VSI
+ * @ch: ptr to channel
* @vf_id: ID of the VF being configured
*
* returns a pointer to a VSI on success, NULL on failure.
*/
static struct ice_vsi *
-ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type vsi_type, u16 vf_id)
+ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type vsi_type,
+ struct ice_channel *ch, u16 vf_id)
{
struct device *dev = ice_pf_to_dev(pf);
struct ice_vsi *vsi = NULL;
@@ -444,10 +480,17 @@ ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type vsi_type, u16 vf_id)
if (vsi_type == ICE_VSI_VF)
ice_vsi_set_num_qs(vsi, vf_id);
- else
+ else if (vsi_type != ICE_VSI_CHNL)
ice_vsi_set_num_qs(vsi, ICE_INVAL_VFID);
switch (vsi->type) {
+ case ICE_VSI_SWITCHDEV_CTRL:
+ if (ice_vsi_alloc_arrays(vsi))
+ goto err_rings;
+
+ /* Setup eswitch MSIX irq handler for VSI */
+ vsi->irq_handler = ice_eswitch_msix_clean_rings;
+ break;
case ICE_VSI_PF:
if (ice_vsi_alloc_arrays(vsi))
goto err_rings;
@@ -466,6 +509,13 @@ ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type vsi_type, u16 vf_id)
if (ice_vsi_alloc_arrays(vsi))
goto err_rings;
break;
+ case ICE_VSI_CHNL:
+ if (!ch)
+ goto err_rings;
+ vsi->num_rxq = ch->num_rxq;
+ vsi->num_txq = ch->num_txq;
+ vsi->next_base_q = ch->base_q;
+ break;
case ICE_VSI_LB:
if (ice_vsi_alloc_arrays(vsi))
goto err_rings;
@@ -582,6 +632,9 @@ static int ice_vsi_get_qs(struct ice_vsi *vsi)
};
int ret;
+ if (vsi->type == ICE_VSI_CHNL)
+ return 0;
+
ret = __ice_vsi_get_qs(&tx_qs_cfg);
if (ret)
return ret;
@@ -606,12 +659,12 @@ static void ice_vsi_put_qs(struct ice_vsi *vsi)
mutex_lock(&pf->avail_q_mutex);
- for (i = 0; i < vsi->alloc_txq; i++) {
+ ice_for_each_alloc_txq(vsi, i) {
clear_bit(vsi->txq_map[i], pf->avail_txqs);
vsi->txq_map[i] = ICE_INVAL_Q_INDEX;
}
- for (i = 0; i < vsi->alloc_rxq; i++) {
+ ice_for_each_alloc_rxq(vsi, i) {
clear_bit(vsi->rxq_map[i], pf->avail_rxqs);
vsi->rxq_map[i] = ICE_INVAL_Q_INDEX;
}
@@ -700,12 +753,23 @@ static void ice_vsi_set_rss_params(struct ice_vsi *vsi)
cap = &pf->hw.func_caps.common_cap;
switch (vsi->type) {
+ case ICE_VSI_CHNL:
case ICE_VSI_PF:
/* PF VSI will inherit RSS instance of PF */
vsi->rss_table_size = (u16)cap->rss_table_size;
+ if (vsi->type == ICE_VSI_CHNL)
+ vsi->rss_size = min_t(u16, vsi->num_rxq,
+ BIT(cap->rss_table_entry_width));
+ else
+ vsi->rss_size = min_t(u16, num_online_cpus(),
+ BIT(cap->rss_table_entry_width));
+ vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF;
+ break;
+ case ICE_VSI_SWITCHDEV_CTRL:
+ vsi->rss_table_size = ICE_VSIQF_HLUT_ARRAY_SIZE;
vsi->rss_size = min_t(u16, num_online_cpus(),
BIT(cap->rss_table_entry_width));
- vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF;
+ vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI;
break;
case ICE_VSI_VF:
/* VF VSI will get a small RSS table.
@@ -775,21 +839,13 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
u16 num_txq_per_tc, num_rxq_per_tc;
u16 qcount_tx = vsi->alloc_txq;
u16 qcount_rx = vsi->alloc_rxq;
- bool ena_tc0 = false;
u8 netdev_tc = 0;
int i;
- /* at least TC0 should be enabled by default */
- if (vsi->tc_cfg.numtc) {
- if (!(vsi->tc_cfg.ena_tc & BIT(0)))
- ena_tc0 = true;
- } else {
- ena_tc0 = true;
- }
-
- if (ena_tc0) {
- vsi->tc_cfg.numtc++;
- vsi->tc_cfg.ena_tc |= 1;
+ if (!vsi->tc_cfg.numtc) {
+ /* at least TC0 should be enabled by default */
+ vsi->tc_cfg.numtc = 1;
+ vsi->tc_cfg.ena_tc = 1;
}
num_rxq_per_tc = min_t(u16, qcount_rx / vsi->tc_cfg.numtc, ICE_MAX_RXQS_PER_TC);
@@ -931,6 +987,7 @@ static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
dev = ice_pf_to_dev(pf);
switch (vsi->type) {
+ case ICE_VSI_CHNL:
case ICE_VSI_PF:
/* PF VSI will inherit RSS instance of PF */
lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF;
@@ -953,6 +1010,28 @@ static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
ICE_AQ_VSI_Q_OPT_RSS_HASH_M);
}
+static void
+ice_chnl_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
+{
+ struct ice_pf *pf = vsi->back;
+ u16 qcount, qmap;
+ u8 offset = 0;
+ int pow;
+
+ qcount = min_t(int, vsi->num_rxq, pf->num_lan_msix);
+
+ pow = order_base_2(qcount);
+ qmap = ((offset << ICE_AQ_VSI_TC_Q_OFFSET_S) &
+ ICE_AQ_VSI_TC_Q_OFFSET_M) |
+ ((pow << ICE_AQ_VSI_TC_Q_NUM_S) &
+ ICE_AQ_VSI_TC_Q_NUM_M);
+
+ ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
+ ctxt->info.mapping_flags |= cpu_to_le16(ICE_AQ_VSI_Q_MAP_CONTIG);
+ ctxt->info.q_mapping[0] = cpu_to_le16(vsi->next_base_q);
+ ctxt->info.q_mapping[1] = cpu_to_le16(qcount);
+}
+
/**
* ice_vsi_init - Create and initialize a VSI
* @vsi: the VSI being configured
@@ -980,6 +1059,10 @@ static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi)
case ICE_VSI_PF:
ctxt->flags = ICE_AQ_VSI_TYPE_PF;
break;
+ case ICE_VSI_SWITCHDEV_CTRL:
+ case ICE_VSI_CHNL:
+ ctxt->flags = ICE_AQ_VSI_TYPE_VMDQ2;
+ break;
case ICE_VSI_VF:
ctxt->flags = ICE_AQ_VSI_TYPE_VF;
/* VF number here is the absolute VF number (0-255) */
@@ -990,6 +1073,21 @@ static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi)
goto out;
}
+ /* Handle VLAN pruning for channel VSI if main VSI has VLAN
+ * prune enabled
+ */
+ if (vsi->type == ICE_VSI_CHNL) {
+ struct ice_vsi *main_vsi;
+
+ main_vsi = ice_get_main_vsi(pf);
+ if (main_vsi && ice_vsi_is_vlan_pruning_ena(main_vsi))
+ ctxt->info.sw_flags2 |=
+ ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
+ else
+ ctxt->info.sw_flags2 &=
+ ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
+ }
+
ice_set_dflt_vsi_ctx(ctxt);
if (test_bit(ICE_FLAG_FD_ENA, pf->flags))
ice_set_fd_vsi_ctx(ctxt, vsi);
@@ -1010,13 +1108,17 @@ static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi)
}
ctxt->info.sw_id = vsi->port_info->sw_id;
- ice_vsi_setup_q_map(vsi, ctxt);
- if (!init_vsi) /* means VSI being updated */
- /* must to indicate which section of VSI context are
- * being modified
- */
- ctxt->info.valid_sections |=
- cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
+ if (vsi->type == ICE_VSI_CHNL) {
+ ice_chnl_vsi_setup_q_map(vsi, ctxt);
+ } else {
+ ice_vsi_setup_q_map(vsi, ctxt);
+ if (!init_vsi) /* means VSI being updated */
+ /* must to indicate which section of VSI context are
+ * being modified
+ */
+ ctxt->info.valid_sections |=
+ cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
+ }
/* enable/disable MAC and VLAN anti-spoof when spoofchk is on/off
* respectively
@@ -1195,6 +1297,8 @@ static int ice_vsi_setup_vector_base(struct ice_vsi *vsi)
/* SRIOV doesn't grab irq_tracker entries for each VSI */
if (vsi->type == ICE_VSI_VF)
return 0;
+ if (vsi->type == ICE_VSI_CHNL)
+ return 0;
if (vsi->base_vector) {
dev_dbg(dev, "VSI %d has non-zero base vector %d\n",
@@ -1249,14 +1353,14 @@ static void ice_vsi_clear_rings(struct ice_vsi *vsi)
struct ice_q_vector *q_vector = vsi->q_vectors[i];
if (q_vector) {
- q_vector->tx.ring = NULL;
- q_vector->rx.ring = NULL;
+ q_vector->tx.tx_ring = NULL;
+ q_vector->rx.rx_ring = NULL;
}
}
}
if (vsi->tx_rings) {
- for (i = 0; i < vsi->alloc_txq; i++) {
+ ice_for_each_alloc_txq(vsi, i) {
if (vsi->tx_rings[i]) {
kfree_rcu(vsi->tx_rings[i], rcu);
WRITE_ONCE(vsi->tx_rings[i], NULL);
@@ -1264,7 +1368,7 @@ static void ice_vsi_clear_rings(struct ice_vsi *vsi)
}
}
if (vsi->rx_rings) {
- for (i = 0; i < vsi->alloc_rxq; i++) {
+ ice_for_each_alloc_rxq(vsi, i) {
if (vsi->rx_rings[i]) {
kfree_rcu(vsi->rx_rings[i], rcu);
WRITE_ONCE(vsi->rx_rings[i], NULL);
@@ -1285,8 +1389,8 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
dev = ice_pf_to_dev(pf);
/* Allocate Tx rings */
- for (i = 0; i < vsi->alloc_txq; i++) {
- struct ice_ring *ring;
+ ice_for_each_alloc_txq(vsi, i) {
+ struct ice_tx_ring *ring;
/* allocate with kzalloc(), free with kfree_rcu() */
ring = kzalloc(sizeof(*ring), GFP_KERNEL);
@@ -1296,7 +1400,6 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
ring->q_index = i;
ring->reg_idx = vsi->txq_map[i];
- ring->ring_active = false;
ring->vsi = vsi;
ring->tx_tstamps = &pf->ptp.port.tx;
ring->dev = dev;
@@ -1305,8 +1408,8 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
}
/* Allocate Rx rings */
- for (i = 0; i < vsi->alloc_rxq; i++) {
- struct ice_ring *ring;
+ ice_for_each_alloc_rxq(vsi, i) {
+ struct ice_rx_ring *ring;
/* allocate with kzalloc(), free with kfree_rcu() */
ring = kzalloc(sizeof(*ring), GFP_KERNEL);
@@ -1315,7 +1418,6 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
ring->q_index = i;
ring->reg_idx = vsi->rxq_map[i];
- ring->ring_active = false;
ring->vsi = vsi;
ring->netdev = vsi->netdev;
ring->dev = dev;
@@ -1363,7 +1465,7 @@ void ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena)
* ice_vsi_cfg_rss_lut_key - Configure RSS params for a VSI
* @vsi: VSI to be configured
*/
-static int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi)
+int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi)
{
struct ice_pf *pf = vsi->back;
struct device *dev;
@@ -1371,7 +1473,25 @@ static int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi)
int err;
dev = ice_pf_to_dev(pf);
- vsi->rss_size = min_t(u16, vsi->rss_size, vsi->num_rxq);
+ if (vsi->type == ICE_VSI_PF && vsi->ch_rss_size &&
+ (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))) {
+ vsi->rss_size = min_t(u16, vsi->rss_size, vsi->ch_rss_size);
+ } else {
+ vsi->rss_size = min_t(u16, vsi->rss_size, vsi->num_rxq);
+
+ /* If orig_rss_size is valid and it is less than determined
+ * main VSI's rss_size, update main VSI's rss_size to be
+ * orig_rss_size so that when tc-qdisc is deleted, main VSI
+ * RSS table gets programmed to be correct (whatever it was
+ * to begin with (prior to setup-tc for ADQ config)
+ */
+ if (vsi->orig_rss_size && vsi->rss_size < vsi->orig_rss_size &&
+ vsi->orig_rss_size <= vsi->num_rxq) {
+ vsi->rss_size = vsi->orig_rss_size;
+ /* now orig_rss_size is used, reset it to zero */
+ vsi->orig_rss_size = 0;
+ }
+ }
lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
if (!lut)
@@ -1710,7 +1830,7 @@ int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx)
return ice_vsi_cfg_rxq(vsi->rx_rings[q_idx]);
}
-int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_ring **tx_rings, u16 q_idx)
+int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_tx_ring **tx_rings, u16 q_idx)
{
struct ice_aqc_add_tx_qgrp *qg_buf;
int err;
@@ -1766,7 +1886,7 @@ setup_rings:
* Configure the Tx VSI for operation.
*/
static int
-ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings, u16 count)
+ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_tx_ring **rings, u16 count)
{
struct ice_aqc_add_tx_qgrp *qg_buf;
u16 q_idx = 0;
@@ -1817,8 +1937,8 @@ int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi)
if (ret)
return ret;
- for (i = 0; i < vsi->num_xdp_txq; i++)
- vsi->xdp_rings[i]->xsk_pool = ice_xsk_pool(vsi->xdp_rings[i]);
+ ice_for_each_xdp_txq(vsi, i)
+ vsi->xdp_rings[i]->xsk_pool = ice_tx_xsk_pool(vsi->xdp_rings[i]);
return ret;
}
@@ -1853,6 +1973,23 @@ void ice_write_intrl(struct ice_q_vector *q_vector, u8 intrl)
ice_intrl_usec_to_reg(intrl, ICE_INTRL_GRAN_ABOVE_25));
}
+static struct ice_q_vector *ice_pull_qvec_from_rc(struct ice_ring_container *rc)
+{
+ switch (rc->type) {
+ case ICE_RX_CONTAINER:
+ if (rc->rx_ring)
+ return rc->rx_ring->q_vector;
+ break;
+ case ICE_TX_CONTAINER:
+ if (rc->tx_ring)
+ return rc->tx_ring->q_vector;
+ default:
+ break;
+ }
+
+ return NULL;
+}
+
/**
* __ice_write_itr - write throttle rate to register
* @q_vector: pointer to interrupt data structure
@@ -1877,15 +2014,39 @@ void ice_write_itr(struct ice_ring_container *rc, u16 itr)
{
struct ice_q_vector *q_vector;
- if (!rc->ring)
+ q_vector = ice_pull_qvec_from_rc(rc);
+ if (!q_vector)
return;
- q_vector = rc->ring->q_vector;
-
__ice_write_itr(q_vector, rc, itr);
}
/**
+ * ice_set_q_vector_intrl - set up interrupt rate limiting
+ * @q_vector: the vector to be configured
+ *
+ * Interrupt rate limiting is local to the vector, not per-queue so we must
+ * detect if either ring container has dynamic moderation enabled to decide
+ * what to set the interrupt rate limit to via INTRL settings. In the case that
+ * dynamic moderation is disabled on both, write the value with the cached
+ * setting to make sure INTRL register matches the user visible value.
+ */
+void ice_set_q_vector_intrl(struct ice_q_vector *q_vector)
+{
+ if (ITR_IS_DYNAMIC(&q_vector->tx) || ITR_IS_DYNAMIC(&q_vector->rx)) {
+ /* in the case of dynamic enabled, cap each vector to no more
+ * than (4 us) 250,000 ints/sec, which allows low latency
+ * but still less than 500,000 interrupts per second, which
+ * reduces CPU a bit in the case of the lowest latency
+ * setting. The 4 here is a value in microseconds.
+ */
+ ice_write_intrl(q_vector, 4);
+ } else {
+ ice_write_intrl(q_vector, q_vector->intrl);
+ }
+}
+
+/**
* ice_vsi_cfg_msix - MSIX mode Interrupt Config in the HW
* @vsi: the VSI being configured
*
@@ -1899,7 +2060,7 @@ void ice_vsi_cfg_msix(struct ice_vsi *vsi)
u16 txq = 0, rxq = 0;
int i, q;
- for (i = 0; i < vsi->num_q_vectors; i++) {
+ ice_for_each_q_vector(vsi, i) {
struct ice_q_vector *q_vector = vsi->q_vectors[i];
u16 reg_idx = q_vector->reg_idx;
@@ -2057,7 +2218,7 @@ int ice_vsi_stop_all_rx_rings(struct ice_vsi *vsi)
*/
static int
ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
- u16 rel_vmvf_num, struct ice_ring **rings, u16 count)
+ u16 rel_vmvf_num, struct ice_tx_ring **rings, u16 count)
{
u16 q_idx;
@@ -2179,10 +2340,14 @@ err_out:
static void ice_vsi_set_tc_cfg(struct ice_vsi *vsi)
{
- struct ice_dcbx_cfg *cfg = &vsi->port_info->qos_cfg.local_dcbx_cfg;
+ if (!test_bit(ICE_FLAG_DCB_ENA, vsi->back->flags)) {
+ vsi->tc_cfg.ena_tc = ICE_DFLT_TRAFFIC_CLASS;
+ vsi->tc_cfg.numtc = 1;
+ return;
+ }
- vsi->tc_cfg.ena_tc = ice_dcb_get_ena_tc(cfg);
- vsi->tc_cfg.numtc = ice_dcb_get_num_tc(cfg);
+ /* set VSI TC information based on DCB config */
+ ice_vsi_set_dcb_tc_cfg(vsi);
}
/**
@@ -2295,8 +2460,10 @@ static void ice_set_agg_vsi(struct ice_vsi *vsi)
switch (vsi->type) {
case ICE_VSI_CTRL:
+ case ICE_VSI_CHNL:
case ICE_VSI_LB:
case ICE_VSI_PF:
+ case ICE_VSI_SWITCHDEV_CTRL:
max_agg_nodes = ICE_MAX_PF_AGG_NODES;
agg_node_id_start = ICE_PF_AGG_NODE_ID_START;
agg_node_iter = &pf->pf_agg_node[0];
@@ -2393,6 +2560,7 @@ static void ice_set_agg_vsi(struct ice_vsi *vsi)
* @vf_id: defines VF ID to which this VSI connects. This field is meant to be
* used only for ICE_VSI_VF VSI type. For other VSI types, should
* fill-in ICE_INVAL_VFID as input.
+ * @ch: ptr to channel
*
* This allocates the sw VSI structure and its queue resources.
*
@@ -2401,7 +2569,7 @@ static void ice_set_agg_vsi(struct ice_vsi *vsi)
*/
struct ice_vsi *
ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
- enum ice_vsi_type vsi_type, u16 vf_id)
+ enum ice_vsi_type vsi_type, u16 vf_id, struct ice_channel *ch)
{
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
struct device *dev = ice_pf_to_dev(pf);
@@ -2409,10 +2577,12 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
struct ice_vsi *vsi;
int ret, i;
- if (vsi_type == ICE_VSI_VF || vsi_type == ICE_VSI_CTRL)
- vsi = ice_vsi_alloc(pf, vsi_type, vf_id);
+ if (vsi_type == ICE_VSI_CHNL)
+ vsi = ice_vsi_alloc(pf, vsi_type, ch, ICE_INVAL_VFID);
+ else if (vsi_type == ICE_VSI_VF || vsi_type == ICE_VSI_CTRL)
+ vsi = ice_vsi_alloc(pf, vsi_type, NULL, vf_id);
else
- vsi = ice_vsi_alloc(pf, vsi_type, ICE_INVAL_VFID);
+ vsi = ice_vsi_alloc(pf, vsi_type, NULL, ICE_INVAL_VFID);
if (!vsi) {
dev_err(dev, "could not allocate VSI\n");
@@ -2429,10 +2599,12 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
ice_alloc_fd_res(vsi);
- if (ice_vsi_get_qs(vsi)) {
- dev_err(dev, "Failed to allocate queues. vsi->idx = %d\n",
- vsi->idx);
- goto unroll_vsi_alloc;
+ if (vsi_type != ICE_VSI_CHNL) {
+ if (ice_vsi_get_qs(vsi)) {
+ dev_err(dev, "Failed to allocate queues. vsi->idx = %d\n",
+ vsi->idx);
+ goto unroll_vsi_alloc;
+ }
}
/* set RSS capabilities */
@@ -2448,6 +2620,7 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
switch (vsi->type) {
case ICE_VSI_CTRL:
+ case ICE_VSI_SWITCHDEV_CTRL:
case ICE_VSI_PF:
ret = ice_vsi_alloc_q_vectors(vsi);
if (ret)
@@ -2490,6 +2663,12 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
}
ice_init_arfs(vsi);
break;
+ case ICE_VSI_CHNL:
+ if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
+ ice_vsi_cfg_rss_lut_key(vsi);
+ ice_vsi_set_rss_flow_fld(vsi);
+ }
+ break;
case ICE_VSI_VF:
/* VF driver will take care of creating netdev for this type and
* map queues to vectors through Virtchnl, PF driver only
@@ -2528,9 +2707,21 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
}
/* configure VSI nodes based on number of queues and TC's */
- for (i = 0; i < vsi->tc_cfg.numtc; i++)
- max_txqs[i] = vsi->alloc_txq;
+ ice_for_each_traffic_class(i) {
+ if (!(vsi->tc_cfg.ena_tc & BIT(i)))
+ continue;
+
+ if (vsi->type == ICE_VSI_CHNL) {
+ if (!vsi->alloc_txq && vsi->num_txq)
+ max_txqs[i] = vsi->num_txq;
+ else
+ max_txqs[i] = pf->num_lan_tx;
+ } else {
+ max_txqs[i] = vsi->alloc_txq;
+ }
+ }
+ dev_dbg(dev, "vsi->tc_cfg.ena_tc = %d\n", vsi->tc_cfg.ena_tc);
status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
max_txqs);
if (status) {
@@ -2591,7 +2782,7 @@ static void ice_vsi_release_msix(struct ice_vsi *vsi)
u32 rxq = 0;
int i, q;
- for (i = 0; i < vsi->num_q_vectors; i++) {
+ ice_for_each_q_vector(vsi, i) {
struct ice_q_vector *q_vector = vsi->q_vectors[i];
ice_write_intrl(q_vector, 0);
@@ -2757,7 +2948,8 @@ void ice_dis_vsi(struct ice_vsi *vsi, bool locked)
} else {
ice_vsi_close(vsi);
}
- } else if (vsi->type == ICE_VSI_CTRL) {
+ } else if (vsi->type == ICE_VSI_CTRL ||
+ vsi->type == ICE_VSI_SWITCHDEV_CTRL) {
ice_vsi_close(vsi);
}
}
@@ -2860,7 +3052,8 @@ int ice_vsi_release(struct ice_vsi *vsi)
clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
}
- ice_devlink_destroy_port(vsi);
+ if (vsi->type == ICE_VSI_PF)
+ ice_devlink_destroy_pf_port(pf);
if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
ice_rss_clean(vsi);
@@ -3041,7 +3234,7 @@ ice_vsi_rebuild_set_coalesce(struct ice_vsi *vsi,
}
vsi->q_vectors[i]->intrl = coalesce[i].intrl;
- ice_write_intrl(vsi->q_vectors[i], coalesce[i].intrl);
+ ice_set_q_vector_intrl(vsi->q_vectors[i]);
}
/* the number of queue vectors increased so write whatever is in
@@ -3059,7 +3252,7 @@ ice_vsi_rebuild_set_coalesce(struct ice_vsi *vsi,
ice_write_itr(rc, rc->itr_setting);
vsi->q_vectors[i]->intrl = coalesce[0].intrl;
- ice_write_intrl(vsi->q_vectors[i], coalesce[0].intrl);
+ ice_set_q_vector_intrl(vsi->q_vectors[i]);
}
}
@@ -3144,6 +3337,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
switch (vtype) {
case ICE_VSI_CTRL:
+ case ICE_VSI_SWITCHDEV_CTRL:
case ICE_VSI_PF:
ret = ice_vsi_alloc_q_vectors(vsi);
if (ret)
@@ -3163,7 +3357,9 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
ice_vsi_map_rings_to_vectors(vsi);
if (ice_is_xdp_ena_vsi(vsi)) {
- vsi->num_xdp_txq = vsi->alloc_rxq;
+ ret = ice_vsi_determine_xdp_res(vsi);
+ if (ret)
+ goto err_vectors;
ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog);
if (ret)
goto err_vectors;
@@ -3191,20 +3387,42 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
goto err_vectors;
break;
+ case ICE_VSI_CHNL:
+ if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
+ ice_vsi_cfg_rss_lut_key(vsi);
+ ice_vsi_set_rss_flow_fld(vsi);
+ }
+ break;
default:
break;
}
/* configure VSI nodes based on number of queues and TC's */
for (i = 0; i < vsi->tc_cfg.numtc; i++) {
- max_txqs[i] = vsi->alloc_txq;
+ /* configure VSI nodes based on number of queues and TC's.
+ * ADQ creates VSIs for each TC/Channel but doesn't
+ * allocate queues instead it reconfigures the PF queues
+ * as per the TC command. So max_txqs should point to the
+ * PF Tx queues.
+ */
+ if (vtype == ICE_VSI_CHNL)
+ max_txqs[i] = pf->num_lan_tx;
+ else
+ max_txqs[i] = vsi->alloc_txq;
if (ice_is_xdp_ena_vsi(vsi))
max_txqs[i] += vsi->num_xdp_txq;
}
- status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
- max_txqs);
+ if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
+ /* If MQPRIO is set, means channel code path, hence for main
+ * VSI's, use TC as 1
+ */
+ status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, 1, max_txqs);
+ else
+ status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx,
+ vsi->tc_cfg.ena_tc, max_txqs);
+
if (status) {
dev_err(ice_pf_to_dev(pf), "VSI %d failed lan queue config, error %s\n",
vsi->vsi_num, ice_stat_str(status));
@@ -3276,7 +3494,6 @@ int ice_wait_for_reset(struct ice_pf *pf, unsigned long timeout)
return 0;
}
-#ifdef CONFIG_DCB
/**
* ice_vsi_update_q_map - update our copy of the VSI info with new queue map
* @vsi: VSI being configured
@@ -3292,6 +3509,146 @@ static void ice_vsi_update_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctx)
}
/**
+ * ice_vsi_cfg_netdev_tc - Setup the netdev TC configuration
+ * @vsi: the VSI being configured
+ * @ena_tc: TC map to be enabled
+ */
+void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc)
+{
+ struct net_device *netdev = vsi->netdev;
+ struct ice_pf *pf = vsi->back;
+ int numtc = vsi->tc_cfg.numtc;
+ struct ice_dcbx_cfg *dcbcfg;
+ u8 netdev_tc;
+ int i;
+
+ if (!netdev)
+ return;
+
+ /* CHNL VSI doesn't have it's own netdev, hence, no netdev_tc */
+ if (vsi->type == ICE_VSI_CHNL)
+ return;
+
+ if (!ena_tc) {
+ netdev_reset_tc(netdev);
+ return;
+ }
+
+ if (vsi->type == ICE_VSI_PF && ice_is_adq_active(pf))
+ numtc = vsi->all_numtc;
+
+ if (netdev_set_num_tc(netdev, numtc))
+ return;
+
+ dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
+
+ ice_for_each_traffic_class(i)
+ if (vsi->tc_cfg.ena_tc & BIT(i))
+ netdev_set_tc_queue(netdev,
+ vsi->tc_cfg.tc_info[i].netdev_tc,
+ vsi->tc_cfg.tc_info[i].qcount_tx,
+ vsi->tc_cfg.tc_info[i].qoffset);
+ /* setup TC queue map for CHNL TCs */
+ ice_for_each_chnl_tc(i) {
+ if (!(vsi->all_enatc & BIT(i)))
+ break;
+ if (!vsi->mqprio_qopt.qopt.count[i])
+ break;
+ netdev_set_tc_queue(netdev, i,
+ vsi->mqprio_qopt.qopt.count[i],
+ vsi->mqprio_qopt.qopt.offset[i]);
+ }
+
+ if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
+ return;
+
+ for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) {
+ u8 ets_tc = dcbcfg->etscfg.prio_table[i];
+
+ /* Get the mapped netdev TC# for the UP */
+ netdev_tc = vsi->tc_cfg.tc_info[ets_tc].netdev_tc;
+ netdev_set_prio_tc_map(netdev, i, netdev_tc);
+ }
+}
+
+/**
+ * ice_vsi_setup_q_map_mqprio - Prepares mqprio based tc_config
+ * @vsi: the VSI being configured,
+ * @ctxt: VSI context structure
+ * @ena_tc: number of traffic classes to enable
+ *
+ * Prepares VSI tc_config to have queue configurations based on MQPRIO options.
+ */
+static void
+ice_vsi_setup_q_map_mqprio(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt,
+ u8 ena_tc)
+{
+ u16 pow, offset = 0, qcount_tx = 0, qcount_rx = 0, qmap;
+ u16 tc0_offset = vsi->mqprio_qopt.qopt.offset[0];
+ int tc0_qcount = vsi->mqprio_qopt.qopt.count[0];
+ u8 netdev_tc = 0;
+ int i;
+
+ vsi->tc_cfg.ena_tc = ena_tc ? ena_tc : 1;
+
+ pow = order_base_2(tc0_qcount);
+ qmap = ((tc0_offset << ICE_AQ_VSI_TC_Q_OFFSET_S) &
+ ICE_AQ_VSI_TC_Q_OFFSET_M) |
+ ((pow << ICE_AQ_VSI_TC_Q_NUM_S) & ICE_AQ_VSI_TC_Q_NUM_M);
+
+ ice_for_each_traffic_class(i) {
+ if (!(vsi->tc_cfg.ena_tc & BIT(i))) {
+ /* TC is not enabled */
+ vsi->tc_cfg.tc_info[i].qoffset = 0;
+ vsi->tc_cfg.tc_info[i].qcount_rx = 1;
+ vsi->tc_cfg.tc_info[i].qcount_tx = 1;
+ vsi->tc_cfg.tc_info[i].netdev_tc = 0;
+ ctxt->info.tc_mapping[i] = 0;
+ continue;
+ }
+
+ offset = vsi->mqprio_qopt.qopt.offset[i];
+ qcount_rx = vsi->mqprio_qopt.qopt.count[i];
+ qcount_tx = vsi->mqprio_qopt.qopt.count[i];
+ vsi->tc_cfg.tc_info[i].qoffset = offset;
+ vsi->tc_cfg.tc_info[i].qcount_rx = qcount_rx;
+ vsi->tc_cfg.tc_info[i].qcount_tx = qcount_tx;
+ vsi->tc_cfg.tc_info[i].netdev_tc = netdev_tc++;
+ }
+
+ if (vsi->all_numtc && vsi->all_numtc != vsi->tc_cfg.numtc) {
+ ice_for_each_chnl_tc(i) {
+ if (!(vsi->all_enatc & BIT(i)))
+ continue;
+ offset = vsi->mqprio_qopt.qopt.offset[i];
+ qcount_rx = vsi->mqprio_qopt.qopt.count[i];
+ qcount_tx = vsi->mqprio_qopt.qopt.count[i];
+ }
+ }
+
+ /* Set actual Tx/Rx queue pairs */
+ vsi->num_txq = offset + qcount_tx;
+ vsi->num_rxq = offset + qcount_rx;
+
+ /* Setup queue TC[0].qmap for given VSI context */
+ ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
+ ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]);
+ ctxt->info.q_mapping[1] = cpu_to_le16(tc0_qcount);
+
+ /* Find queue count available for channel VSIs and starting offset
+ * for channel VSIs
+ */
+ if (tc0_qcount && tc0_qcount < vsi->num_rxq) {
+ vsi->cnt_q_avail = vsi->num_rxq - tc0_qcount;
+ vsi->next_base_q = tc0_qcount;
+ }
+ dev_dbg(ice_pf_to_dev(vsi->back), "vsi->num_txq = %d\n", vsi->num_txq);
+ dev_dbg(ice_pf_to_dev(vsi->back), "vsi->num_rxq = %d\n", vsi->num_rxq);
+ dev_dbg(ice_pf_to_dev(vsi->back), "all_numtc %u, all_enatc: 0x%04x, tc_cfg.numtc %u\n",
+ vsi->all_numtc, vsi->all_enatc, vsi->tc_cfg.numtc);
+}
+
+/**
* ice_vsi_cfg_tc - Configure VSI Tx Sched for given TC map
* @vsi: VSI to be configured
* @ena_tc: TC bitmap
@@ -3309,6 +3666,9 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
u8 num_tc = 0;
dev = ice_pf_to_dev(pf);
+ if (vsi->tc_cfg.ena_tc == ena_tc &&
+ vsi->mqprio_qopt.mode != TC_MQPRIO_MODE_CHANNEL)
+ return ret;
ice_for_each_traffic_class(i) {
/* build bitmap of enabled TCs */
@@ -3316,6 +3676,12 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
num_tc++;
/* populate max_txqs per TC */
max_txqs[i] = vsi->alloc_txq;
+ /* Update max_txqs if it is CHNL VSI, because alloc_t[r]xq are
+ * zero for CHNL VSI, hence use num_txq instead as max_txqs
+ */
+ if (vsi->type == ICE_VSI_CHNL &&
+ test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
+ max_txqs[i] = vsi->num_txq;
}
vsi->tc_cfg.ena_tc = ena_tc;
@@ -3328,7 +3694,11 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
ctx->vf_num = 0;
ctx->info = vsi->info;
- ice_vsi_setup_q_map(vsi, ctx);
+ if (vsi->type == ICE_VSI_PF &&
+ test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
+ ice_vsi_setup_q_map_mqprio(vsi, ctx, ena_tc);
+ else
+ ice_vsi_setup_q_map(vsi, ctx);
/* must to indicate which section of VSI context are being modified */
ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
@@ -3339,8 +3709,13 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
goto out;
}
- status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
- max_txqs);
+ if (vsi->type == ICE_VSI_PF &&
+ test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
+ status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, 1,
+ max_txqs);
+ else
+ status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx,
+ vsi->tc_cfg.ena_tc, max_txqs);
if (status) {
dev_err(dev, "VSI %d failed TC config, error %s\n",
@@ -3356,20 +3731,19 @@ out:
kfree(ctx);
return ret;
}
-#endif /* CONFIG_DCB */
/**
* ice_update_ring_stats - Update ring statistics
- * @ring: ring to update
+ * @stats: stats to be updated
* @pkts: number of processed packets
* @bytes: number of processed bytes
*
* This function assumes that caller has acquired a u64_stats_sync lock.
*/
-static void ice_update_ring_stats(struct ice_ring *ring, u64 pkts, u64 bytes)
+static void ice_update_ring_stats(struct ice_q_stats *stats, u64 pkts, u64 bytes)
{
- ring->stats.bytes += bytes;
- ring->stats.pkts += pkts;
+ stats->bytes += bytes;
+ stats->pkts += pkts;
}
/**
@@ -3378,10 +3752,10 @@ static void ice_update_ring_stats(struct ice_ring *ring, u64 pkts, u64 bytes)
* @pkts: number of processed packets
* @bytes: number of processed bytes
*/
-void ice_update_tx_ring_stats(struct ice_ring *tx_ring, u64 pkts, u64 bytes)
+void ice_update_tx_ring_stats(struct ice_tx_ring *tx_ring, u64 pkts, u64 bytes)
{
u64_stats_update_begin(&tx_ring->syncp);
- ice_update_ring_stats(tx_ring, pkts, bytes);
+ ice_update_ring_stats(&tx_ring->stats, pkts, bytes);
u64_stats_update_end(&tx_ring->syncp);
}
@@ -3391,10 +3765,10 @@ void ice_update_tx_ring_stats(struct ice_ring *tx_ring, u64 pkts, u64 bytes)
* @pkts: number of processed packets
* @bytes: number of processed bytes
*/
-void ice_update_rx_ring_stats(struct ice_ring *rx_ring, u64 pkts, u64 bytes)
+void ice_update_rx_ring_stats(struct ice_rx_ring *rx_ring, u64 pkts, u64 bytes)
{
u64_stats_update_begin(&rx_ring->syncp);
- ice_update_ring_stats(rx_ring, pkts, bytes);
+ ice_update_ring_stats(&rx_ring->stats, pkts, bytes);
u64_stats_update_end(&rx_ring->syncp);
}
@@ -3547,6 +3921,180 @@ int ice_clear_dflt_vsi(struct ice_sw *sw)
}
/**
+ * ice_get_link_speed_mbps - get link speed in Mbps
+ * @vsi: the VSI whose link speed is being queried
+ *
+ * Return current VSI link speed and 0 if the speed is unknown.
+ */
+int ice_get_link_speed_mbps(struct ice_vsi *vsi)
+{
+ switch (vsi->port_info->phy.link_info.link_speed) {
+ case ICE_AQ_LINK_SPEED_100GB:
+ return SPEED_100000;
+ case ICE_AQ_LINK_SPEED_50GB:
+ return SPEED_50000;
+ case ICE_AQ_LINK_SPEED_40GB:
+ return SPEED_40000;
+ case ICE_AQ_LINK_SPEED_25GB:
+ return SPEED_25000;
+ case ICE_AQ_LINK_SPEED_20GB:
+ return SPEED_20000;
+ case ICE_AQ_LINK_SPEED_10GB:
+ return SPEED_10000;
+ case ICE_AQ_LINK_SPEED_5GB:
+ return SPEED_5000;
+ case ICE_AQ_LINK_SPEED_2500MB:
+ return SPEED_2500;
+ case ICE_AQ_LINK_SPEED_1000MB:
+ return SPEED_1000;
+ case ICE_AQ_LINK_SPEED_100MB:
+ return SPEED_100;
+ case ICE_AQ_LINK_SPEED_10MB:
+ return SPEED_10;
+ case ICE_AQ_LINK_SPEED_UNKNOWN:
+ default:
+ return 0;
+ }
+}
+
+/**
+ * ice_get_link_speed_kbps - get link speed in Kbps
+ * @vsi: the VSI whose link speed is being queried
+ *
+ * Return current VSI link speed and 0 if the speed is unknown.
+ */
+int ice_get_link_speed_kbps(struct ice_vsi *vsi)
+{
+ int speed_mbps;
+
+ speed_mbps = ice_get_link_speed_mbps(vsi);
+
+ return speed_mbps * 1000;
+}
+
+/**
+ * ice_set_min_bw_limit - setup minimum BW limit for Tx based on min_tx_rate
+ * @vsi: VSI to be configured
+ * @min_tx_rate: min Tx rate in Kbps to be configured as BW limit
+ *
+ * If the min_tx_rate is specified as 0 that means to clear the minimum BW limit
+ * profile, otherwise a non-zero value will force a minimum BW limit for the VSI
+ * on TC 0.
+ */
+int ice_set_min_bw_limit(struct ice_vsi *vsi, u64 min_tx_rate)
+{
+ struct ice_pf *pf = vsi->back;
+ enum ice_status status;
+ struct device *dev;
+ int speed;
+
+ dev = ice_pf_to_dev(pf);
+ if (!vsi->port_info) {
+ dev_dbg(dev, "VSI %d, type %u specified doesn't have valid port_info\n",
+ vsi->idx, vsi->type);
+ return -EINVAL;
+ }
+
+ speed = ice_get_link_speed_kbps(vsi);
+ if (min_tx_rate > (u64)speed) {
+ dev_err(dev, "invalid min Tx rate %llu Kbps specified for %s %d is greater than current link speed %u Kbps\n",
+ min_tx_rate, ice_vsi_type_str(vsi->type), vsi->idx,
+ speed);
+ return -EINVAL;
+ }
+
+ /* Configure min BW for VSI limit */
+ if (min_tx_rate) {
+ status = ice_cfg_vsi_bw_lmt_per_tc(vsi->port_info, vsi->idx, 0,
+ ICE_MIN_BW, min_tx_rate);
+ if (status) {
+ dev_err(dev, "failed to set min Tx rate(%llu Kbps) for %s %d\n",
+ min_tx_rate, ice_vsi_type_str(vsi->type),
+ vsi->idx);
+ return -EIO;
+ }
+
+ dev_dbg(dev, "set min Tx rate(%llu Kbps) for %s\n",
+ min_tx_rate, ice_vsi_type_str(vsi->type));
+ } else {
+ status = ice_cfg_vsi_bw_dflt_lmt_per_tc(vsi->port_info,
+ vsi->idx, 0,
+ ICE_MIN_BW);
+ if (status) {
+ dev_err(dev, "failed to clear min Tx rate configuration for %s %d\n",
+ ice_vsi_type_str(vsi->type), vsi->idx);
+ return -EIO;
+ }
+
+ dev_dbg(dev, "cleared min Tx rate configuration for %s %d\n",
+ ice_vsi_type_str(vsi->type), vsi->idx);
+ }
+
+ return 0;
+}
+
+/**
+ * ice_set_max_bw_limit - setup maximum BW limit for Tx based on max_tx_rate
+ * @vsi: VSI to be configured
+ * @max_tx_rate: max Tx rate in Kbps to be configured as BW limit
+ *
+ * If the max_tx_rate is specified as 0 that means to clear the maximum BW limit
+ * profile, otherwise a non-zero value will force a maximum BW limit for the VSI
+ * on TC 0.
+ */
+int ice_set_max_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate)
+{
+ struct ice_pf *pf = vsi->back;
+ enum ice_status status;
+ struct device *dev;
+ int speed;
+
+ dev = ice_pf_to_dev(pf);
+ if (!vsi->port_info) {
+ dev_dbg(dev, "VSI %d, type %u specified doesn't have valid port_info\n",
+ vsi->idx, vsi->type);
+ return -EINVAL;
+ }
+
+ speed = ice_get_link_speed_kbps(vsi);
+ if (max_tx_rate > (u64)speed) {
+ dev_err(dev, "invalid max Tx rate %llu Kbps specified for %s %d is greater than current link speed %u Kbps\n",
+ max_tx_rate, ice_vsi_type_str(vsi->type), vsi->idx,
+ speed);
+ return -EINVAL;
+ }
+
+ /* Configure max BW for VSI limit */
+ if (max_tx_rate) {
+ status = ice_cfg_vsi_bw_lmt_per_tc(vsi->port_info, vsi->idx, 0,
+ ICE_MAX_BW, max_tx_rate);
+ if (status) {
+ dev_err(dev, "failed setting max Tx rate(%llu Kbps) for %s %d\n",
+ max_tx_rate, ice_vsi_type_str(vsi->type),
+ vsi->idx);
+ return -EIO;
+ }
+
+ dev_dbg(dev, "set max Tx rate(%llu Kbps) for %s %d\n",
+ max_tx_rate, ice_vsi_type_str(vsi->type), vsi->idx);
+ } else {
+ status = ice_cfg_vsi_bw_dflt_lmt_per_tc(vsi->port_info,
+ vsi->idx, 0,
+ ICE_MAX_BW);
+ if (status) {
+ dev_err(dev, "failed clearing max Tx rate configuration for %s %d\n",
+ ice_vsi_type_str(vsi->type), vsi->idx);
+ return -EIO;
+ }
+
+ dev_dbg(dev, "cleared max Tx rate configuration for %s %d\n",
+ ice_vsi_type_str(vsi->type), vsi->idx);
+ }
+
+ return 0;
+}
+
+/**
* ice_set_link - turn on/off physical link
* @vsi: VSI to modify physical link on
* @ena: turn on/off physical link
@@ -3582,3 +4130,126 @@ int ice_set_link(struct ice_vsi *vsi, bool ena)
return 0;
}
+
+/**
+ * ice_is_feature_supported
+ * @pf: pointer to the struct ice_pf instance
+ * @f: feature enum to be checked
+ *
+ * returns true if feature is supported, false otherwise
+ */
+bool ice_is_feature_supported(struct ice_pf *pf, enum ice_feature f)
+{
+ if (f < 0 || f >= ICE_F_MAX)
+ return false;
+
+ return test_bit(f, pf->features);
+}
+
+/**
+ * ice_set_feature_support
+ * @pf: pointer to the struct ice_pf instance
+ * @f: feature enum to set
+ */
+static void ice_set_feature_support(struct ice_pf *pf, enum ice_feature f)
+{
+ if (f < 0 || f >= ICE_F_MAX)
+ return;
+
+ set_bit(f, pf->features);
+}
+
+/**
+ * ice_clear_feature_support
+ * @pf: pointer to the struct ice_pf instance
+ * @f: feature enum to clear
+ */
+void ice_clear_feature_support(struct ice_pf *pf, enum ice_feature f)
+{
+ if (f < 0 || f >= ICE_F_MAX)
+ return;
+
+ clear_bit(f, pf->features);
+}
+
+/**
+ * ice_init_feature_support
+ * @pf: pointer to the struct ice_pf instance
+ *
+ * called during init to setup supported feature
+ */
+void ice_init_feature_support(struct ice_pf *pf)
+{
+ switch (pf->hw.device_id) {
+ case ICE_DEV_ID_E810C_BACKPLANE:
+ case ICE_DEV_ID_E810C_QSFP:
+ case ICE_DEV_ID_E810C_SFP:
+ ice_set_feature_support(pf, ICE_F_DSCP);
+ if (ice_is_e810t(&pf->hw))
+ ice_set_feature_support(pf, ICE_F_SMA_CTRL);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * ice_vsi_update_security - update security block in VSI
+ * @vsi: pointer to VSI structure
+ * @fill: function pointer to fill ctx
+ */
+int
+ice_vsi_update_security(struct ice_vsi *vsi, void (*fill)(struct ice_vsi_ctx *))
+{
+ struct ice_vsi_ctx ctx = { 0 };
+
+ ctx.info = vsi->info;
+ ctx.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
+ fill(&ctx);
+
+ if (ice_update_vsi(&vsi->back->hw, vsi->idx, &ctx, NULL))
+ return -ENODEV;
+
+ vsi->info = ctx.info;
+ return 0;
+}
+
+/**
+ * ice_vsi_ctx_set_antispoof - set antispoof function in VSI ctx
+ * @ctx: pointer to VSI ctx structure
+ */
+void ice_vsi_ctx_set_antispoof(struct ice_vsi_ctx *ctx)
+{
+ ctx->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF |
+ (ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
+ ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
+}
+
+/**
+ * ice_vsi_ctx_clear_antispoof - clear antispoof function in VSI ctx
+ * @ctx: pointer to VSI ctx structure
+ */
+void ice_vsi_ctx_clear_antispoof(struct ice_vsi_ctx *ctx)
+{
+ ctx->info.sec_flags &= ~ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF &
+ ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
+ ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
+}
+
+/**
+ * ice_vsi_ctx_set_allow_override - allow destination override on VSI
+ * @ctx: pointer to VSI ctx structure
+ */
+void ice_vsi_ctx_set_allow_override(struct ice_vsi_ctx *ctx)
+{
+ ctx->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD;
+}
+
+/**
+ * ice_vsi_ctx_clear_allow_override - turn off destination override on VSI
+ * @ctx: pointer to VSI ctx structure
+ */
+void ice_vsi_ctx_clear_allow_override(struct ice_vsi_ctx *ctx)
+{
+ ctx->info.sec_flags &= ~ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
index d5a28bf0fc2c..e7f4ecbb8549 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_lib.h
@@ -14,7 +14,7 @@ void ice_update_eth_stats(struct ice_vsi *vsi);
int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx);
-int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_ring **tx_rings, u16 q_idx);
+int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_tx_ring **tx_rings, u16 q_idx);
int ice_vsi_cfg_rxqs(struct ice_vsi *vsi);
@@ -51,13 +51,18 @@ void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create);
int ice_set_link(struct ice_vsi *vsi, bool ena);
-#ifdef CONFIG_DCB
+void ice_vsi_delete(struct ice_vsi *vsi);
+int ice_vsi_clear(struct ice_vsi *vsi);
+
int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc);
-#endif /* CONFIG_DCB */
+
+int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi);
+
+void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc);
struct ice_vsi *
ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
- enum ice_vsi_type vsi_type, u16 vf_id);
+ enum ice_vsi_type vsi_type, u16 vf_id, struct ice_channel *ch);
void ice_napi_del(struct ice_vsi *vsi);
@@ -93,9 +98,9 @@ void ice_vsi_free_tx_rings(struct ice_vsi *vsi);
void ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena);
-void ice_update_tx_ring_stats(struct ice_ring *ring, u64 pkts, u64 bytes);
+void ice_update_tx_ring_stats(struct ice_tx_ring *ring, u64 pkts, u64 bytes);
-void ice_update_rx_ring_stats(struct ice_ring *ring, u64 pkts, u64 bytes);
+void ice_update_rx_ring_stats(struct ice_rx_ring *ring, u64 pkts, u64 bytes);
void ice_vsi_cfg_frame_size(struct ice_vsi *vsi);
@@ -103,6 +108,7 @@ int ice_status_to_errno(enum ice_status err);
void ice_write_intrl(struct ice_q_vector *q_vector, u8 intrl);
void ice_write_itr(struct ice_ring_container *rc, u16 itr);
+void ice_set_q_vector_intrl(struct ice_q_vector *q_vector);
enum ice_status
ice_vsi_cfg_mac_fltr(struct ice_vsi *vsi, const u8 *macaddr, bool set);
@@ -116,4 +122,22 @@ bool ice_is_vsi_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi);
int ice_set_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi);
int ice_clear_dflt_vsi(struct ice_sw *sw);
+int ice_set_min_bw_limit(struct ice_vsi *vsi, u64 min_tx_rate);
+int ice_set_max_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate);
+int ice_get_link_speed_kbps(struct ice_vsi *vsi);
+int ice_get_link_speed_mbps(struct ice_vsi *vsi);
+int
+ice_vsi_update_security(struct ice_vsi *vsi, void (*fill)(struct ice_vsi_ctx *));
+
+void ice_vsi_ctx_set_antispoof(struct ice_vsi_ctx *ctx);
+
+void ice_vsi_ctx_clear_antispoof(struct ice_vsi_ctx *ctx);
+
+void ice_vsi_ctx_set_allow_override(struct ice_vsi_ctx *ctx);
+
+void ice_vsi_ctx_clear_allow_override(struct ice_vsi_ctx *ctx);
+
+bool ice_is_feature_supported(struct ice_pf *pf, enum ice_feature f);
+void ice_clear_feature_support(struct ice_pf *pf, enum ice_feature f);
+void ice_init_feature_support(struct ice_pf *pf);
#endif /* !_ICE_LIB_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 06fa93e597fb..9ba22778011d 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -19,6 +19,8 @@
*/
#define CREATE_TRACE_POINTS
#include "ice_trace.h"
+#include "ice_eswitch.h"
+#include "ice_tc_lib.h"
#define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver"
static const char ice_driver_string[] = DRV_SUMMARY;
@@ -42,16 +44,20 @@ MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)");
#endif /* !CONFIG_DYNAMIC_DEBUG */
static DEFINE_IDA(ice_aux_ida);
+DEFINE_STATIC_KEY_FALSE(ice_xdp_locking_key);
+EXPORT_SYMBOL(ice_xdp_locking_key);
static struct workqueue_struct *ice_wq;
static const struct net_device_ops ice_netdev_safe_mode_ops;
static const struct net_device_ops ice_netdev_ops;
-static int ice_vsi_open(struct ice_vsi *vsi);
static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type);
static void ice_vsi_release_all(struct ice_pf *pf);
+static int ice_rebuild_channels(struct ice_pf *pf);
+static void ice_remove_q_channels(struct ice_vsi *vsi, bool rem_adv_fltr);
+
bool netif_is_ice(struct net_device *dev)
{
return dev && (dev->netdev_ops == &ice_netdev_ops);
@@ -61,7 +67,7 @@ bool netif_is_ice(struct net_device *dev)
* ice_get_tx_pending - returns number of Tx descriptors not processed
* @ring: the ring of descriptors
*/
-static u16 ice_get_tx_pending(struct ice_ring *ring)
+static u16 ice_get_tx_pending(struct ice_tx_ring *ring)
{
u16 head, tail;
@@ -100,10 +106,15 @@ static void ice_check_for_hang_subtask(struct ice_pf *pf)
hw = &vsi->back->hw;
- for (i = 0; i < vsi->num_txq; i++) {
- struct ice_ring *tx_ring = vsi->tx_rings[i];
+ ice_for_each_txq(vsi, i) {
+ struct ice_tx_ring *tx_ring = vsi->tx_rings[i];
+
+ if (!tx_ring)
+ continue;
+ if (ice_ring_ch_enabled(tx_ring))
+ continue;
- if (tx_ring && tx_ring->desc) {
+ if (tx_ring->desc) {
/* If packet counter has not changed the queue is
* likely stalled, so force an interrupt for this
* queue.
@@ -455,17 +466,21 @@ static void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked)
}
/**
- * ice_prepare_for_reset - prep for the core to reset
+ * ice_prepare_for_reset - prep for reset
* @pf: board private structure
+ * @reset_type: reset type requested
*
* Inform or close all dependent features in prep for reset.
*/
static void
-ice_prepare_for_reset(struct ice_pf *pf)
+ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
{
struct ice_hw *hw = &pf->hw;
+ struct ice_vsi *vsi;
unsigned int i;
+ dev_dbg(ice_pf_to_dev(pf), "reset_type=%d\n", reset_type);
+
/* already prepared for reset */
if (test_bit(ICE_PREPARED_FOR_RESET, pf->state))
return;
@@ -480,6 +495,38 @@ ice_prepare_for_reset(struct ice_pf *pf)
ice_for_each_vf(pf, i)
ice_set_vf_state_qs_dis(&pf->vf[i]);
+ /* release ADQ specific HW and SW resources */
+ vsi = ice_get_main_vsi(pf);
+ if (!vsi)
+ goto skip;
+
+ /* to be on safe side, reset orig_rss_size so that normal flow
+ * of deciding rss_size can take precedence
+ */
+ vsi->orig_rss_size = 0;
+
+ if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
+ if (reset_type == ICE_RESET_PFR) {
+ vsi->old_ena_tc = vsi->all_enatc;
+ vsi->old_numtc = vsi->all_numtc;
+ } else {
+ ice_remove_q_channels(vsi, true);
+
+ /* for other reset type, do not support channel rebuild
+ * hence reset needed info
+ */
+ vsi->old_ena_tc = 0;
+ vsi->all_enatc = 0;
+ vsi->old_numtc = 0;
+ vsi->all_numtc = 0;
+ vsi->req_txq = 0;
+ vsi->req_rxq = 0;
+ clear_bit(ICE_FLAG_TC_MQPRIO, pf->flags);
+ memset(&vsi->mqprio_qopt, 0, sizeof(vsi->mqprio_qopt));
+ }
+ }
+skip:
+
/* clear SW filtering DB */
ice_clear_hw_tbls(hw);
/* disable the VSIs and their queues that are not already DOWN */
@@ -499,8 +546,7 @@ ice_prepare_for_reset(struct ice_pf *pf)
/**
* ice_do_reset - Initiate one of many types of resets
* @pf: board private structure
- * @reset_type: reset type requested
- * before this function was called.
+ * @reset_type: reset type requested before this function was called.
*/
static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
{
@@ -509,7 +555,7 @@ static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
dev_dbg(dev, "reset_type 0x%x requested\n", reset_type);
- ice_prepare_for_reset(pf);
+ ice_prepare_for_reset(pf, reset_type);
/* trigger the reset */
if (ice_reset(hw, reset_type)) {
@@ -567,7 +613,7 @@ static void ice_reset_subtask(struct ice_pf *pf)
/* return if no valid reset type requested */
if (reset_type == ICE_RESET_INVAL)
return;
- ice_prepare_for_reset(pf);
+ ice_prepare_for_reset(pf, reset_type);
/* make sure we are ready to rebuild */
if (ice_check_reset(&pf->hw)) {
@@ -624,7 +670,10 @@ static void ice_print_topo_conflict(struct ice_vsi *vsi)
netdev_info(vsi->netdev, "Potential misconfiguration of the Ethernet port detected. If it was not intended, please use the Intel (R) Ethernet Port Configuration Tool to address the issue.\n");
break;
case ICE_AQ_LINK_TOPO_UNSUPP_MEDIA:
- netdev_info(vsi->netdev, "Rx/Tx is disabled on this device because an unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
+ if (test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, vsi->back->flags))
+ netdev_warn(vsi->netdev, "An unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules\n");
+ else
+ netdev_err(vsi->netdev, "Rx/Tx is disabled on this device because an unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
break;
default:
break;
@@ -1965,7 +2014,8 @@ static int ice_configure_phy(struct ice_vsi *vsi)
ice_print_topo_conflict(vsi);
- if (phy->link_info.topo_media_conflict == ICE_AQ_LINK_TOPO_UNSUPP_MEDIA)
+ if (!test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags) &&
+ phy->link_info.topo_media_conflict == ICE_AQ_LINK_TOPO_UNSUPP_MEDIA)
return -EPERM;
if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags))
@@ -2302,14 +2352,14 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
irq_num = pf->msix_entries[base + vector].vector;
- if (q_vector->tx.ring && q_vector->rx.ring) {
+ if (q_vector->tx.tx_ring && q_vector->rx.rx_ring) {
snprintf(q_vector->name, sizeof(q_vector->name) - 1,
"%s-%s-%d", basename, "TxRx", rx_int_idx++);
tx_int_idx++;
- } else if (q_vector->rx.ring) {
+ } else if (q_vector->rx.rx_ring) {
snprintf(q_vector->name, sizeof(q_vector->name) - 1,
"%s-%s-%d", basename, "rx", rx_int_idx++);
- } else if (q_vector->tx.ring) {
+ } else if (q_vector->tx.tx_ring) {
snprintf(q_vector->name, sizeof(q_vector->name) - 1,
"%s-%s-%d", basename, "tx", tx_int_idx++);
} else {
@@ -2367,11 +2417,12 @@ free_q_irqs:
static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
{
struct device *dev = ice_pf_to_dev(vsi->back);
- int i;
+ struct ice_tx_desc *tx_desc;
+ int i, j;
- for (i = 0; i < vsi->num_xdp_txq; i++) {
+ ice_for_each_xdp_txq(vsi, i) {
u16 xdp_q_idx = vsi->alloc_txq + i;
- struct ice_ring *xdp_ring;
+ struct ice_tx_ring *xdp_ring;
xdp_ring = kzalloc(sizeof(*xdp_ring), GFP_KERNEL);
@@ -2380,16 +2431,29 @@ static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
xdp_ring->q_index = xdp_q_idx;
xdp_ring->reg_idx = vsi->txq_map[xdp_q_idx];
- xdp_ring->ring_active = false;
xdp_ring->vsi = vsi;
xdp_ring->netdev = NULL;
+ xdp_ring->next_dd = ICE_TX_THRESH - 1;
+ xdp_ring->next_rs = ICE_TX_THRESH - 1;
xdp_ring->dev = dev;
xdp_ring->count = vsi->num_tx_desc;
WRITE_ONCE(vsi->xdp_rings[i], xdp_ring);
if (ice_setup_tx_ring(xdp_ring))
goto free_xdp_rings;
ice_set_ring_xdp(xdp_ring);
- xdp_ring->xsk_pool = ice_xsk_pool(xdp_ring);
+ xdp_ring->xsk_pool = ice_tx_xsk_pool(xdp_ring);
+ spin_lock_init(&xdp_ring->tx_lock);
+ for (j = 0; j < xdp_ring->count; j++) {
+ tx_desc = ICE_TX_DESC(xdp_ring, j);
+ tx_desc->cmd_type_offset_bsz = cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE);
+ }
+ }
+
+ ice_for_each_rxq(vsi, i) {
+ if (static_key_enabled(&ice_xdp_locking_key))
+ vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i % vsi->num_xdp_txq];
+ else
+ vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i];
}
return 0;
@@ -2455,6 +2519,10 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
if (__ice_vsi_get_qs(&xdp_qs_cfg))
goto err_map_xdp;
+ if (static_key_enabled(&ice_xdp_locking_key))
+ netdev_warn(vsi->netdev,
+ "Could not allocate one XDP Tx ring per CPU, XDP_TX/XDP_REDIRECT actions will be slower\n");
+
if (ice_xdp_alloc_setup_rings(vsi))
goto clear_xdp_rings;
@@ -2468,11 +2536,11 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
q_base = vsi->num_xdp_txq - xdp_rings_rem;
for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) {
- struct ice_ring *xdp_ring = vsi->xdp_rings[q_id];
+ struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_id];
xdp_ring->q_vector = q_vector;
- xdp_ring->next = q_vector->tx.ring;
- q_vector->tx.ring = xdp_ring;
+ xdp_ring->next = q_vector->tx.tx_ring;
+ q_vector->tx.tx_ring = xdp_ring;
}
xdp_rings_rem -= xdp_rings_per_v;
}
@@ -2501,7 +2569,7 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
return 0;
clear_xdp_rings:
- for (i = 0; i < vsi->num_xdp_txq; i++)
+ ice_for_each_xdp_txq(vsi, i)
if (vsi->xdp_rings[i]) {
kfree_rcu(vsi->xdp_rings[i], rcu);
vsi->xdp_rings[i] = NULL;
@@ -2509,7 +2577,7 @@ clear_xdp_rings:
err_map_xdp:
mutex_lock(&pf->avail_q_mutex);
- for (i = 0; i < vsi->num_xdp_txq; i++) {
+ ice_for_each_xdp_txq(vsi, i) {
clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs);
vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX;
}
@@ -2542,25 +2610,25 @@ int ice_destroy_xdp_rings(struct ice_vsi *vsi)
ice_for_each_q_vector(vsi, v_idx) {
struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
- struct ice_ring *ring;
+ struct ice_tx_ring *ring;
- ice_for_each_ring(ring, q_vector->tx)
+ ice_for_each_tx_ring(ring, q_vector->tx)
if (!ring->tx_buf || !ice_ring_is_xdp(ring))
break;
/* restore the value of last node prior to XDP setup */
- q_vector->tx.ring = ring;
+ q_vector->tx.tx_ring = ring;
}
free_qmap:
mutex_lock(&pf->avail_q_mutex);
- for (i = 0; i < vsi->num_xdp_txq; i++) {
+ ice_for_each_xdp_txq(vsi, i) {
clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs);
vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX;
}
mutex_unlock(&pf->avail_q_mutex);
- for (i = 0; i < vsi->num_xdp_txq; i++)
+ ice_for_each_xdp_txq(vsi, i)
if (vsi->xdp_rings[i]) {
if (vsi->xdp_rings[i]->desc)
ice_free_tx_ring(vsi->xdp_rings[i]);
@@ -2571,6 +2639,9 @@ free_qmap:
devm_kfree(ice_pf_to_dev(pf), vsi->xdp_rings);
vsi->xdp_rings = NULL;
+ if (static_key_enabled(&ice_xdp_locking_key))
+ static_branch_dec(&ice_xdp_locking_key);
+
if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
return 0;
@@ -2598,7 +2669,7 @@ static void ice_vsi_rx_napi_schedule(struct ice_vsi *vsi)
int i;
ice_for_each_rxq(vsi, i) {
- struct ice_ring *rx_ring = vsi->rx_rings[i];
+ struct ice_rx_ring *rx_ring = vsi->rx_rings[i];
if (rx_ring->xsk_pool)
napi_schedule(&rx_ring->q_vector->napi);
@@ -2606,6 +2677,29 @@ static void ice_vsi_rx_napi_schedule(struct ice_vsi *vsi)
}
/**
+ * ice_vsi_determine_xdp_res - figure out how many Tx qs can XDP have
+ * @vsi: VSI to determine the count of XDP Tx qs
+ *
+ * returns 0 if Tx qs count is higher than at least half of CPU count,
+ * -ENOMEM otherwise
+ */
+int ice_vsi_determine_xdp_res(struct ice_vsi *vsi)
+{
+ u16 avail = ice_get_avail_txq_count(vsi->back);
+ u16 cpus = num_possible_cpus();
+
+ if (avail < cpus / 2)
+ return -ENOMEM;
+
+ vsi->num_xdp_txq = min_t(u16, avail, cpus);
+
+ if (vsi->num_xdp_txq < cpus)
+ static_branch_inc(&ice_xdp_locking_key);
+
+ return 0;
+}
+
+/**
* ice_xdp_setup_prog - Add or remove XDP eBPF program
* @vsi: VSI to setup XDP for
* @prog: XDP program
@@ -2634,10 +2728,14 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
}
if (!ice_is_xdp_ena_vsi(vsi) && prog) {
- vsi->num_xdp_txq = vsi->alloc_rxq;
- xdp_ring_err = ice_prepare_xdp_rings(vsi, prog);
- if (xdp_ring_err)
- NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed");
+ xdp_ring_err = ice_vsi_determine_xdp_res(vsi);
+ if (xdp_ring_err) {
+ NL_SET_ERR_MSG_MOD(extack, "Not enough Tx resources for XDP");
+ } else {
+ xdp_ring_err = ice_prepare_xdp_rings(vsi, prog);
+ if (xdp_ring_err)
+ NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed");
+ }
} else if (ice_is_xdp_ena_vsi(vsi) && !prog) {
xdp_ring_err = ice_destroy_xdp_rings(vsi);
if (xdp_ring_err)
@@ -3103,6 +3201,9 @@ static void ice_set_netdev_features(struct net_device *netdev)
/* enable features */
netdev->features |= netdev->hw_features;
+
+ netdev->hw_features |= NETIF_F_HW_TC;
+
/* encap and VLAN devices inherit default, csumo and tso features */
netdev->hw_enc_features |= dflt_features | csumo_features |
tso_features;
@@ -3139,7 +3240,7 @@ static int ice_cfg_netdev(struct ice_vsi *vsi)
if (vsi->type == ICE_VSI_PF) {
SET_NETDEV_DEV(netdev, ice_pf_to_dev(vsi->back));
ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
- ether_addr_copy(netdev->dev_addr, mac_addr);
+ eth_hw_addr_set(netdev, mac_addr);
ether_addr_copy(netdev->perm_addr, mac_addr);
}
@@ -3182,7 +3283,14 @@ void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size)
static struct ice_vsi *
ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
{
- return ice_vsi_setup(pf, pi, ICE_VSI_PF, ICE_INVAL_VFID);
+ return ice_vsi_setup(pf, pi, ICE_VSI_PF, ICE_INVAL_VFID, NULL);
+}
+
+static struct ice_vsi *
+ice_chnl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
+ struct ice_channel *ch)
+{
+ return ice_vsi_setup(pf, pi, ICE_VSI_CHNL, ICE_INVAL_VFID, ch);
}
/**
@@ -3196,7 +3304,7 @@ ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
static struct ice_vsi *
ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
{
- return ice_vsi_setup(pf, pi, ICE_VSI_CTRL, ICE_INVAL_VFID);
+ return ice_vsi_setup(pf, pi, ICE_VSI_CTRL, ICE_INVAL_VFID, NULL);
}
/**
@@ -3210,7 +3318,7 @@ ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
struct ice_vsi *
ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
{
- return ice_vsi_setup(pf, pi, ICE_VSI_LB, ICE_INVAL_VFID);
+ return ice_vsi_setup(pf, pi, ICE_VSI_LB, ICE_INVAL_VFID, NULL);
}
/**
@@ -3303,6 +3411,9 @@ static int ice_setup_pf_sw(struct ice_pf *pf)
if (!vsi)
return -ENOMEM;
+ /* init channel list */
+ INIT_LIST_HEAD(&vsi->ch_list);
+
status = ice_cfg_netdev(vsi);
if (status) {
status = -ENODEV;
@@ -3538,6 +3649,13 @@ static int ice_ena_msix_range(struct ice_pf *pf)
v_left -= needed;
}
+ /* reserve for switchdev */
+ needed = ICE_ESWITCH_MSIX;
+ if (v_left < needed)
+ goto no_hw_vecs_left_err;
+ v_budget += needed;
+ v_left -= needed;
+
/* total used for non-traffic vectors */
v_other = v_budget;
@@ -4170,11 +4288,11 @@ static int ice_register_netdev(struct ice_pf *pf)
set_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
netif_carrier_off(vsi->netdev);
netif_tx_stop_all_queues(vsi->netdev);
- err = ice_devlink_create_port(vsi);
+ err = ice_devlink_create_pf_port(pf);
if (err)
goto err_devlink_create;
- devlink_port_type_eth_set(&vsi->devlink_port, vsi->netdev);
+ devlink_port_type_eth_set(&pf->devlink_port, vsi->netdev);
return 0;
err_devlink_create:
@@ -4261,12 +4379,6 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M);
- err = ice_devlink_register(pf);
- if (err) {
- dev_err(dev, "ice_devlink_register failed: %d\n", err);
- goto err_exit_unroll;
- }
-
#ifndef CONFIG_DYNAMIC_DEBUG
if (debug < -1)
hw->debug_mask = debug;
@@ -4279,6 +4391,8 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
goto err_exit_unroll;
}
+ ice_init_feature_support(pf);
+
ice_request_fw(pf);
/* if ice_request_fw fails, ICE_FLAG_ADV_FEATURES bit won't be
@@ -4500,6 +4614,7 @@ probe_done:
dev_warn(dev, "RDMA is not supported on this device\n");
}
+ ice_devlink_register(pf);
return 0;
err_init_aux_unroll:
@@ -4523,7 +4638,6 @@ err_init_pf_unroll:
ice_devlink_destroy_regions(pf);
ice_deinit_hw(hw);
err_exit_unroll:
- ice_devlink_unregister(pf);
pci_disable_pcie_error_reporting(pdev);
pci_disable_device(pdev);
return err;
@@ -4600,9 +4714,7 @@ static void ice_remove(struct pci_dev *pdev)
struct ice_pf *pf = pci_get_drvdata(pdev);
int i;
- if (!pf)
- return;
-
+ ice_devlink_unregister(pf);
for (i = 0; i < ICE_MAX_RESET_WAIT; i++) {
if (!ice_is_reset_in_progress(pf->state))
break;
@@ -4640,7 +4752,6 @@ static void ice_remove(struct pci_dev *pdev)
ice_deinit_pf(pf);
ice_devlink_destroy_regions(pf);
ice_deinit_hw(&pf->hw);
- ice_devlink_unregister(pf);
/* Issue a PFR as part of the prescribed driver unload flow. Do not
* do it via ice_schedule_reset() since there is no need to rebuild
@@ -4902,7 +5013,7 @@ ice_pci_err_detected(struct pci_dev *pdev, pci_channel_state_t err)
if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) {
set_bit(ICE_PFR_REQ, pf->state);
- ice_prepare_for_reset(pf);
+ ice_prepare_for_reset(pf, ICE_RESET_PFR);
}
}
@@ -4994,7 +5105,7 @@ static void ice_pci_err_reset_prepare(struct pci_dev *pdev)
if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) {
set_bit(ICE_PFR_REQ, pf->state);
- ice_prepare_for_reset(pf);
+ ice_prepare_for_reset(pf, ICE_RESET_PFR);
}
}
}
@@ -5150,10 +5261,16 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi)
return -EBUSY;
}
+ if (ice_chnl_dmac_fltr_cnt(pf)) {
+ netdev_err(netdev, "can't set mac %pM. Device has tc-flower filters, delete all of them and try again\n",
+ mac);
+ return -EAGAIN;
+ }
+
netif_addr_lock_bh(netdev);
ether_addr_copy(old_mac, netdev->dev_addr);
/* change the netdev's MAC address */
- memcpy(netdev->dev_addr, mac, netdev->addr_len);
+ eth_hw_addr_set(netdev, mac);
netif_addr_unlock_bh(netdev);
/* Clean up old MAC filter. Not an error if old filter doesn't exist */
@@ -5181,7 +5298,7 @@ err_update_filters:
netdev_err(netdev, "can't set MAC %pM. filter update failed\n",
mac);
netif_addr_lock_bh(netdev);
- ether_addr_copy(netdev->dev_addr, old_mac);
+ eth_hw_addr_set(netdev, old_mac);
netif_addr_unlock_bh(netdev);
return err;
}
@@ -5401,6 +5518,18 @@ ice_set_features(struct net_device *netdev, netdev_features_t features)
ice_clear_arfs(vsi);
}
+ /* don't turn off hw_tc_offload when ADQ is already enabled */
+ if (!(features & NETIF_F_HW_TC) && ice_is_adq_active(pf)) {
+ dev_err(ice_pf_to_dev(pf), "ADQ is active, can't turn hw_tc_offload off\n");
+ return -EACCES;
+ }
+
+ if ((features & NETIF_F_HW_TC) &&
+ !(netdev->features & NETIF_F_HW_TC))
+ set_bit(ICE_FLAG_CLS_FLOWER, pf->flags);
+ else
+ clear_bit(ICE_FLAG_CLS_FLOWER, pf->flags);
+
return ret;
}
@@ -5450,77 +5579,59 @@ int ice_vsi_cfg(struct ice_vsi *vsi)
}
/* THEORY OF MODERATION:
- * The below code creates custom DIM profiles for use by this driver, because
- * the ice driver hardware works differently than the hardware that DIMLIB was
+ * The ice driver hardware works differently than the hardware that DIMLIB was
* originally made for. ice hardware doesn't have packet count limits that
* can trigger an interrupt, but it *does* have interrupt rate limit support,
- * and this code adds that capability to be used by the driver when it's using
- * DIMLIB. The DIMLIB code was always designed to be a suggestion to the driver
- * for how to "respond" to traffic and interrupts, so this driver uses a
- * slightly different set of moderation parameters to get best performance.
+ * which is hard-coded to a limit of 250,000 ints/second.
+ * If not using dynamic moderation, the INTRL value can be modified
+ * by ethtool rx-usecs-high.
*/
struct ice_dim {
/* the throttle rate for interrupts, basically worst case delay before
* an initial interrupt fires, value is stored in microseconds.
*/
u16 itr;
- /* the rate limit for interrupts, which can cap a delay from a small
- * ITR at a certain amount of interrupts per second. f.e. a 2us ITR
- * could yield as much as 500,000 interrupts per second, but with a
- * 10us rate limit, it limits to 100,000 interrupts per second. Value
- * is stored in microseconds.
- */
- u16 intrl;
};
/* Make a different profile for Rx that doesn't allow quite so aggressive
- * moderation at the high end (it maxes out at 128us or about 8k interrupts a
- * second. The INTRL/rate parameters here are only useful to cap small ITR
- * values, which is why for larger ITR's - like 128, which can only generate
- * 8k interrupts per second, there is no point to rate limit and the values
- * are set to zero. The rate limit values do affect latency, and so must
- * be reasonably small so to not impact latency sensitive tests.
+ * moderation at the high end (it maxes out at 126us or about 8k interrupts a
+ * second.
*/
static const struct ice_dim rx_profile[] = {
- {2, 10},
- {8, 16},
- {32, 0},
- {96, 0},
- {128, 0}
+ {2}, /* 500,000 ints/s, capped at 250K by INTRL */
+ {8}, /* 125,000 ints/s */
+ {16}, /* 62,500 ints/s */
+ {62}, /* 16,129 ints/s */
+ {126} /* 7,936 ints/s */
};
/* The transmit profile, which has the same sorts of values
* as the previous struct
*/
static const struct ice_dim tx_profile[] = {
- {2, 10},
- {8, 16},
- {64, 0},
- {128, 0},
- {256, 0}
+ {2}, /* 500,000 ints/s, capped at 250K by INTRL */
+ {8}, /* 125,000 ints/s */
+ {40}, /* 16,125 ints/s */
+ {128}, /* 7,812 ints/s */
+ {256} /* 3,906 ints/s */
};
static void ice_tx_dim_work(struct work_struct *work)
{
struct ice_ring_container *rc;
- struct ice_q_vector *q_vector;
struct dim *dim;
- u16 itr, intrl;
+ u16 itr;
dim = container_of(work, struct dim, work);
- rc = container_of(dim, struct ice_ring_container, dim);
- q_vector = container_of(rc, struct ice_q_vector, tx);
+ rc = (struct ice_ring_container *)dim->priv;
- if (dim->profile_ix >= ARRAY_SIZE(tx_profile))
- dim->profile_ix = ARRAY_SIZE(tx_profile) - 1;
+ WARN_ON(dim->profile_ix >= ARRAY_SIZE(tx_profile));
/* look up the values in our local table */
itr = tx_profile[dim->profile_ix].itr;
- intrl = tx_profile[dim->profile_ix].intrl;
- ice_trace(tx_dim_work, q_vector, dim);
+ ice_trace(tx_dim_work, container_of(rc, struct ice_q_vector, tx), dim);
ice_write_itr(rc, itr);
- ice_write_intrl(q_vector, intrl);
dim->state = DIM_START_MEASURE;
}
@@ -5528,28 +5639,65 @@ static void ice_tx_dim_work(struct work_struct *work)
static void ice_rx_dim_work(struct work_struct *work)
{
struct ice_ring_container *rc;
- struct ice_q_vector *q_vector;
struct dim *dim;
- u16 itr, intrl;
+ u16 itr;
dim = container_of(work, struct dim, work);
- rc = container_of(dim, struct ice_ring_container, dim);
- q_vector = container_of(rc, struct ice_q_vector, rx);
+ rc = (struct ice_ring_container *)dim->priv;
- if (dim->profile_ix >= ARRAY_SIZE(rx_profile))
- dim->profile_ix = ARRAY_SIZE(rx_profile) - 1;
+ WARN_ON(dim->profile_ix >= ARRAY_SIZE(rx_profile));
/* look up the values in our local table */
itr = rx_profile[dim->profile_ix].itr;
- intrl = rx_profile[dim->profile_ix].intrl;
- ice_trace(rx_dim_work, q_vector, dim);
+ ice_trace(rx_dim_work, container_of(rc, struct ice_q_vector, rx), dim);
ice_write_itr(rc, itr);
- ice_write_intrl(q_vector, intrl);
dim->state = DIM_START_MEASURE;
}
+#define ICE_DIM_DEFAULT_PROFILE_IX 1
+
+/**
+ * ice_init_moderation - set up interrupt moderation
+ * @q_vector: the vector containing rings to be configured
+ *
+ * Set up interrupt moderation registers, with the intent to do the right thing
+ * when called from reset or from probe, and whether or not dynamic moderation
+ * is enabled or not. Take special care to write all the registers in both
+ * dynamic moderation mode or not in order to make sure hardware is in a known
+ * state.
+ */
+static void ice_init_moderation(struct ice_q_vector *q_vector)
+{
+ struct ice_ring_container *rc;
+ bool tx_dynamic, rx_dynamic;
+
+ rc = &q_vector->tx;
+ INIT_WORK(&rc->dim.work, ice_tx_dim_work);
+ rc->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
+ rc->dim.profile_ix = ICE_DIM_DEFAULT_PROFILE_IX;
+ rc->dim.priv = rc;
+ tx_dynamic = ITR_IS_DYNAMIC(rc);
+
+ /* set the initial TX ITR to match the above */
+ ice_write_itr(rc, tx_dynamic ?
+ tx_profile[rc->dim.profile_ix].itr : rc->itr_setting);
+
+ rc = &q_vector->rx;
+ INIT_WORK(&rc->dim.work, ice_rx_dim_work);
+ rc->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
+ rc->dim.profile_ix = ICE_DIM_DEFAULT_PROFILE_IX;
+ rc->dim.priv = rc;
+ rx_dynamic = ITR_IS_DYNAMIC(rc);
+
+ /* set the initial RX ITR to match the above */
+ ice_write_itr(rc, rx_dynamic ? rx_profile[rc->dim.profile_ix].itr :
+ rc->itr_setting);
+
+ ice_set_q_vector_intrl(q_vector);
+}
+
/**
* ice_napi_enable_all - Enable NAPI for all q_vectors in the VSI
* @vsi: the VSI being configured
@@ -5564,13 +5712,9 @@ static void ice_napi_enable_all(struct ice_vsi *vsi)
ice_for_each_q_vector(vsi, q_idx) {
struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
- INIT_WORK(&q_vector->tx.dim.work, ice_tx_dim_work);
- q_vector->tx.dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
+ ice_init_moderation(q_vector);
- INIT_WORK(&q_vector->rx.dim.work, ice_rx_dim_work);
- q_vector->rx.dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
-
- if (q_vector->rx.ring || q_vector->tx.ring)
+ if (q_vector->rx.rx_ring || q_vector->tx.tx_ring)
napi_enable(&q_vector->napi);
}
}
@@ -5630,7 +5774,8 @@ int ice_up(struct ice_vsi *vsi)
/**
* ice_fetch_u64_stats_per_ring - get packets and bytes stats per ring
- * @ring: Tx or Rx ring to read stats from
+ * @syncp: pointer to u64_stats_sync
+ * @stats: stats that pkts and bytes count will be taken from
* @pkts: packets stats counter
* @bytes: bytes stats counter
*
@@ -5638,19 +5783,16 @@ int ice_up(struct ice_vsi *vsi)
* that needs to be performed to read u64 values in 32 bit machine.
*/
static void
-ice_fetch_u64_stats_per_ring(struct ice_ring *ring, u64 *pkts, u64 *bytes)
+ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp, struct ice_q_stats stats,
+ u64 *pkts, u64 *bytes)
{
unsigned int start;
- *pkts = 0;
- *bytes = 0;
- if (!ring)
- return;
do {
- start = u64_stats_fetch_begin_irq(&ring->syncp);
- *pkts = ring->stats.pkts;
- *bytes = ring->stats.bytes;
- } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
+ start = u64_stats_fetch_begin_irq(syncp);
+ *pkts = stats.pkts;
+ *bytes = stats.bytes;
+ } while (u64_stats_fetch_retry_irq(syncp, start));
}
/**
@@ -5660,18 +5802,19 @@ ice_fetch_u64_stats_per_ring(struct ice_ring *ring, u64 *pkts, u64 *bytes)
* @count: number of rings
*/
static void
-ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi, struct ice_ring **rings,
+ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi, struct ice_tx_ring **rings,
u16 count)
{
struct rtnl_link_stats64 *vsi_stats = &vsi->net_stats;
u16 i;
for (i = 0; i < count; i++) {
- struct ice_ring *ring;
- u64 pkts, bytes;
+ struct ice_tx_ring *ring;
+ u64 pkts = 0, bytes = 0;
ring = READ_ONCE(rings[i]);
- ice_fetch_u64_stats_per_ring(ring, &pkts, &bytes);
+ if (ring)
+ ice_fetch_u64_stats_per_ring(&ring->syncp, ring->stats, &pkts, &bytes);
vsi_stats->tx_packets += pkts;
vsi_stats->tx_bytes += bytes;
vsi->tx_restart += ring->tx_stats.restart_q;
@@ -5710,9 +5853,9 @@ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
/* update Rx rings counters */
ice_for_each_rxq(vsi, i) {
- struct ice_ring *ring = READ_ONCE(vsi->rx_rings[i]);
+ struct ice_rx_ring *ring = READ_ONCE(vsi->rx_rings[i]);
- ice_fetch_u64_stats_per_ring(ring, &pkts, &bytes);
+ ice_fetch_u64_stats_per_ring(&ring->syncp, ring->stats, &pkts, &bytes);
vsi_stats->rx_packets += pkts;
vsi_stats->rx_bytes += bytes;
vsi->rx_buf_failed += ring->rx_stats.alloc_buf_failed;
@@ -5976,7 +6119,7 @@ static void ice_napi_disable_all(struct ice_vsi *vsi)
ice_for_each_q_vector(vsi, q_idx) {
struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
- if (q_vector->rx.ring || q_vector->tx.ring)
+ if (q_vector->rx.rx_ring || q_vector->tx.tx_ring)
napi_disable(&q_vector->napi);
cancel_work_sync(&q_vector->tx.dim.work);
@@ -5995,9 +6138,11 @@ int ice_down(struct ice_vsi *vsi)
/* Caller of this function is expected to set the
* vsi->state ICE_DOWN bit
*/
- if (vsi->netdev) {
+ if (vsi->netdev && vsi->type == ICE_VSI_PF) {
netif_carrier_off(vsi->netdev);
netif_tx_disable(vsi->netdev);
+ } else if (vsi->type == ICE_VSI_SWITCHDEV_CTRL) {
+ ice_eswitch_stop_all_tx_queues(vsi->back);
}
ice_vsi_dis_irq(vsi);
@@ -6059,12 +6204,13 @@ int ice_vsi_setup_tx_rings(struct ice_vsi *vsi)
}
ice_for_each_txq(vsi, i) {
- struct ice_ring *ring = vsi->tx_rings[i];
+ struct ice_tx_ring *ring = vsi->tx_rings[i];
if (!ring)
return -EINVAL;
- ring->netdev = vsi->netdev;
+ if (vsi->netdev)
+ ring->netdev = vsi->netdev;
err = ice_setup_tx_ring(ring);
if (err)
break;
@@ -6090,12 +6236,13 @@ int ice_vsi_setup_rx_rings(struct ice_vsi *vsi)
}
ice_for_each_rxq(vsi, i) {
- struct ice_ring *ring = vsi->rx_rings[i];
+ struct ice_rx_ring *ring = vsi->rx_rings[i];
if (!ring)
return -EINVAL;
- ring->netdev = vsi->netdev;
+ if (vsi->netdev)
+ ring->netdev = vsi->netdev;
err = ice_setup_rx_ring(ring);
if (err)
break;
@@ -6168,7 +6315,7 @@ err_setup_tx:
*
* Returns 0 on success, negative value on error
*/
-static int ice_vsi_open(struct ice_vsi *vsi)
+int ice_vsi_open(struct ice_vsi *vsi)
{
char int_name[ICE_INT_NAME_STR_LEN];
struct ice_pf *pf = vsi->back;
@@ -6193,14 +6340,16 @@ static int ice_vsi_open(struct ice_vsi *vsi)
if (err)
goto err_setup_rx;
- /* Notify the stack of the actual queue counts. */
- err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq);
- if (err)
- goto err_set_qs;
+ if (vsi->type == ICE_VSI_PF) {
+ /* Notify the stack of the actual queue counts. */
+ err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq);
+ if (err)
+ goto err_set_qs;
- err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_rxq);
- if (err)
- goto err_set_qs;
+ err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_rxq);
+ if (err)
+ goto err_set_qs;
+ }
err = ice_up_complete(vsi);
if (err)
@@ -6235,6 +6384,9 @@ static void ice_vsi_release_all(struct ice_pf *pf)
if (!pf->vsi[i])
continue;
+ if (pf->vsi[i]->type == ICE_VSI_CHNL)
+ continue;
+
err = ice_vsi_release(pf->vsi[i]);
if (err)
dev_dbg(ice_pf_to_dev(pf), "Failed to release pf->vsi[%d], err %d, vsi_num = %d\n",
@@ -6439,6 +6591,21 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
goto err_vsi_rebuild;
}
+ err = ice_vsi_rebuild_by_type(pf, ICE_VSI_SWITCHDEV_CTRL);
+ if (err) {
+ dev_err(dev, "Switchdev CTRL VSI rebuild failed: %d\n", err);
+ goto err_vsi_rebuild;
+ }
+
+ if (reset_type == ICE_RESET_PFR) {
+ err = ice_rebuild_channels(pf);
+ if (err) {
+ dev_err(dev, "failed to rebuild and replay ADQ VSIs, err %d\n",
+ err);
+ goto err_vsi_rebuild;
+ }
+ }
+
/* If Flow Director is active */
if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
err = ice_vsi_rebuild_by_type(pf, ICE_VSI_CTRL);
@@ -6985,7 +7152,7 @@ ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_ring *tx_ring = NULL;
+ struct ice_tx_ring *tx_ring = NULL;
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
u32 i;
@@ -7003,7 +7170,7 @@ static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
}
/* now that we have an index, find the tx_ring struct */
- for (i = 0; i < vsi->num_txq; i++)
+ ice_for_each_txq(vsi, i)
if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
if (txqueue == vsi->tx_rings[i]->q_index) {
tx_ring = vsi->tx_rings[i];
@@ -7060,6 +7227,935 @@ static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
}
/**
+ * ice_setup_tc_cls_flower - flower classifier offloads
+ * @np: net device to configure
+ * @filter_dev: device on which filter is added
+ * @cls_flower: offload data
+ */
+static int
+ice_setup_tc_cls_flower(struct ice_netdev_priv *np,
+ struct net_device *filter_dev,
+ struct flow_cls_offload *cls_flower)
+{
+ struct ice_vsi *vsi = np->vsi;
+
+ if (cls_flower->common.chain_index)
+ return -EOPNOTSUPP;
+
+ switch (cls_flower->command) {
+ case FLOW_CLS_REPLACE:
+ return ice_add_cls_flower(filter_dev, vsi, cls_flower);
+ case FLOW_CLS_DESTROY:
+ return ice_del_cls_flower(vsi, cls_flower);
+ default:
+ return -EINVAL;
+ }
+}
+
+/**
+ * ice_setup_tc_block_cb - callback handler registered for TC block
+ * @type: TC SETUP type
+ * @type_data: TC flower offload data that contains user input
+ * @cb_priv: netdev private data
+ */
+static int
+ice_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
+{
+ struct ice_netdev_priv *np = cb_priv;
+
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ return ice_setup_tc_cls_flower(np, np->vsi->netdev,
+ type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/**
+ * ice_validate_mqprio_qopt - Validate TCF input parameters
+ * @vsi: Pointer to VSI
+ * @mqprio_qopt: input parameters for mqprio queue configuration
+ *
+ * This function validates MQPRIO params, such as qcount (power of 2 wherever
+ * needed), and make sure user doesn't specify qcount and BW rate limit
+ * for TCs, which are more than "num_tc"
+ */
+static int
+ice_validate_mqprio_qopt(struct ice_vsi *vsi,
+ struct tc_mqprio_qopt_offload *mqprio_qopt)
+{
+ u64 sum_max_rate = 0, sum_min_rate = 0;
+ int non_power_of_2_qcount = 0;
+ struct ice_pf *pf = vsi->back;
+ int max_rss_q_cnt = 0;
+ struct device *dev;
+ int i, speed;
+ u8 num_tc;
+
+ if (vsi->type != ICE_VSI_PF)
+ return -EINVAL;
+
+ if (mqprio_qopt->qopt.offset[0] != 0 ||
+ mqprio_qopt->qopt.num_tc < 1 ||
+ mqprio_qopt->qopt.num_tc > ICE_CHNL_MAX_TC)
+ return -EINVAL;
+
+ dev = ice_pf_to_dev(pf);
+ vsi->ch_rss_size = 0;
+ num_tc = mqprio_qopt->qopt.num_tc;
+
+ for (i = 0; num_tc; i++) {
+ int qcount = mqprio_qopt->qopt.count[i];
+ u64 max_rate, min_rate, rem;
+
+ if (!qcount)
+ return -EINVAL;
+
+ if (is_power_of_2(qcount)) {
+ if (non_power_of_2_qcount &&
+ qcount > non_power_of_2_qcount) {
+ dev_err(dev, "qcount[%d] cannot be greater than non power of 2 qcount[%d]\n",
+ qcount, non_power_of_2_qcount);
+ return -EINVAL;
+ }
+ if (qcount > max_rss_q_cnt)
+ max_rss_q_cnt = qcount;
+ } else {
+ if (non_power_of_2_qcount &&
+ qcount != non_power_of_2_qcount) {
+ dev_err(dev, "Only one non power of 2 qcount allowed[%d,%d]\n",
+ qcount, non_power_of_2_qcount);
+ return -EINVAL;
+ }
+ if (qcount < max_rss_q_cnt) {
+ dev_err(dev, "non power of 2 qcount[%d] cannot be less than other qcount[%d]\n",
+ qcount, max_rss_q_cnt);
+ return -EINVAL;
+ }
+ max_rss_q_cnt = qcount;
+ non_power_of_2_qcount = qcount;
+ }
+
+ /* TC command takes input in K/N/Gbps or K/M/Gbit etc but
+ * converts the bandwidth rate limit into Bytes/s when
+ * passing it down to the driver. So convert input bandwidth
+ * from Bytes/s to Kbps
+ */
+ max_rate = mqprio_qopt->max_rate[i];
+ max_rate = div_u64(max_rate, ICE_BW_KBPS_DIVISOR);
+ sum_max_rate += max_rate;
+
+ /* min_rate is minimum guaranteed rate and it can't be zero */
+ min_rate = mqprio_qopt->min_rate[i];
+ min_rate = div_u64(min_rate, ICE_BW_KBPS_DIVISOR);
+ sum_min_rate += min_rate;
+
+ if (min_rate && min_rate < ICE_MIN_BW_LIMIT) {
+ dev_err(dev, "TC%d: min_rate(%llu Kbps) < %u Kbps\n", i,
+ min_rate, ICE_MIN_BW_LIMIT);
+ return -EINVAL;
+ }
+
+ iter_div_u64_rem(min_rate, ICE_MIN_BW_LIMIT, &rem);
+ if (rem) {
+ dev_err(dev, "TC%d: Min Rate not multiple of %u Kbps",
+ i, ICE_MIN_BW_LIMIT);
+ return -EINVAL;
+ }
+
+ iter_div_u64_rem(max_rate, ICE_MIN_BW_LIMIT, &rem);
+ if (rem) {
+ dev_err(dev, "TC%d: Max Rate not multiple of %u Kbps",
+ i, ICE_MIN_BW_LIMIT);
+ return -EINVAL;
+ }
+
+ /* min_rate can't be more than max_rate, except when max_rate
+ * is zero (implies max_rate sought is max line rate). In such
+ * a case min_rate can be more than max.
+ */
+ if (max_rate && min_rate > max_rate) {
+ dev_err(dev, "min_rate %llu Kbps can't be more than max_rate %llu Kbps\n",
+ min_rate, max_rate);
+ return -EINVAL;
+ }
+
+ if (i >= mqprio_qopt->qopt.num_tc - 1)
+ break;
+ if (mqprio_qopt->qopt.offset[i + 1] !=
+ (mqprio_qopt->qopt.offset[i] + qcount))
+ return -EINVAL;
+ }
+ if (vsi->num_rxq <
+ (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i]))
+ return -EINVAL;
+ if (vsi->num_txq <
+ (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i]))
+ return -EINVAL;
+
+ speed = ice_get_link_speed_kbps(vsi);
+ if (sum_max_rate && sum_max_rate > (u64)speed) {
+ dev_err(dev, "Invalid max Tx rate(%llu) Kbps > speed(%u) Kbps specified\n",
+ sum_max_rate, speed);
+ return -EINVAL;
+ }
+ if (sum_min_rate && sum_min_rate > (u64)speed) {
+ dev_err(dev, "Invalid min Tx rate(%llu) Kbps > speed (%u) Kbps specified\n",
+ sum_min_rate, speed);
+ return -EINVAL;
+ }
+
+ /* make sure vsi->ch_rss_size is set correctly based on TC's qcount */
+ vsi->ch_rss_size = max_rss_q_cnt;
+
+ return 0;
+}
+
+/**
+ * ice_add_channel - add a channel by adding VSI
+ * @pf: ptr to PF device
+ * @sw_id: underlying HW switching element ID
+ * @ch: ptr to channel structure
+ *
+ * Add a channel (VSI) using add_vsi and queue_map
+ */
+static int ice_add_channel(struct ice_pf *pf, u16 sw_id, struct ice_channel *ch)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_vsi *vsi;
+
+ if (ch->type != ICE_VSI_CHNL) {
+ dev_err(dev, "add new VSI failed, ch->type %d\n", ch->type);
+ return -EINVAL;
+ }
+
+ vsi = ice_chnl_vsi_setup(pf, pf->hw.port_info, ch);
+ if (!vsi || vsi->type != ICE_VSI_CHNL) {
+ dev_err(dev, "create chnl VSI failure\n");
+ return -EINVAL;
+ }
+
+ ch->sw_id = sw_id;
+ ch->vsi_num = vsi->vsi_num;
+ ch->info.mapping_flags = vsi->info.mapping_flags;
+ ch->ch_vsi = vsi;
+ /* set the back pointer of channel for newly created VSI */
+ vsi->ch = ch;
+
+ memcpy(&ch->info.q_mapping, &vsi->info.q_mapping,
+ sizeof(vsi->info.q_mapping));
+ memcpy(&ch->info.tc_mapping, vsi->info.tc_mapping,
+ sizeof(vsi->info.tc_mapping));
+
+ return 0;
+}
+
+/**
+ * ice_chnl_cfg_res
+ * @vsi: the VSI being setup
+ * @ch: ptr to channel structure
+ *
+ * Configure channel specific resources such as rings, vector.
+ */
+static void ice_chnl_cfg_res(struct ice_vsi *vsi, struct ice_channel *ch)
+{
+ int i;
+
+ for (i = 0; i < ch->num_txq; i++) {
+ struct ice_q_vector *tx_q_vector, *rx_q_vector;
+ struct ice_ring_container *rc;
+ struct ice_tx_ring *tx_ring;
+ struct ice_rx_ring *rx_ring;
+
+ tx_ring = vsi->tx_rings[ch->base_q + i];
+ rx_ring = vsi->rx_rings[ch->base_q + i];
+ if (!tx_ring || !rx_ring)
+ continue;
+
+ /* setup ring being channel enabled */
+ tx_ring->ch = ch;
+ rx_ring->ch = ch;
+
+ /* following code block sets up vector specific attributes */
+ tx_q_vector = tx_ring->q_vector;
+ rx_q_vector = rx_ring->q_vector;
+ if (!tx_q_vector && !rx_q_vector)
+ continue;
+
+ if (tx_q_vector) {
+ tx_q_vector->ch = ch;
+ /* setup Tx and Rx ITR setting if DIM is off */
+ rc = &tx_q_vector->tx;
+ if (!ITR_IS_DYNAMIC(rc))
+ ice_write_itr(rc, rc->itr_setting);
+ }
+ if (rx_q_vector) {
+ rx_q_vector->ch = ch;
+ /* setup Tx and Rx ITR setting if DIM is off */
+ rc = &rx_q_vector->rx;
+ if (!ITR_IS_DYNAMIC(rc))
+ ice_write_itr(rc, rc->itr_setting);
+ }
+ }
+
+ /* it is safe to assume that, if channel has non-zero num_t[r]xq, then
+ * GLINT_ITR register would have written to perform in-context
+ * update, hence perform flush
+ */
+ if (ch->num_txq || ch->num_rxq)
+ ice_flush(&vsi->back->hw);
+}
+
+/**
+ * ice_cfg_chnl_all_res - configure channel resources
+ * @vsi: pte to main_vsi
+ * @ch: ptr to channel structure
+ *
+ * This function configures channel specific resources such as flow-director
+ * counter index, and other resources such as queues, vectors, ITR settings
+ */
+static void
+ice_cfg_chnl_all_res(struct ice_vsi *vsi, struct ice_channel *ch)
+{
+ /* configure channel (aka ADQ) resources such as queues, vectors,
+ * ITR settings for channel specific vectors and anything else
+ */
+ ice_chnl_cfg_res(vsi, ch);
+}
+
+/**
+ * ice_setup_hw_channel - setup new channel
+ * @pf: ptr to PF device
+ * @vsi: the VSI being setup
+ * @ch: ptr to channel structure
+ * @sw_id: underlying HW switching element ID
+ * @type: type of channel to be created (VMDq2/VF)
+ *
+ * Setup new channel (VSI) based on specified type (VMDq2/VF)
+ * and configures Tx rings accordingly
+ */
+static int
+ice_setup_hw_channel(struct ice_pf *pf, struct ice_vsi *vsi,
+ struct ice_channel *ch, u16 sw_id, u8 type)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ int ret;
+
+ ch->base_q = vsi->next_base_q;
+ ch->type = type;
+
+ ret = ice_add_channel(pf, sw_id, ch);
+ if (ret) {
+ dev_err(dev, "failed to add_channel using sw_id %u\n", sw_id);
+ return ret;
+ }
+
+ /* configure/setup ADQ specific resources */
+ ice_cfg_chnl_all_res(vsi, ch);
+
+ /* make sure to update the next_base_q so that subsequent channel's
+ * (aka ADQ) VSI queue map is correct
+ */
+ vsi->next_base_q = vsi->next_base_q + ch->num_rxq;
+ dev_dbg(dev, "added channel: vsi_num %u, num_rxq %u\n", ch->vsi_num,
+ ch->num_rxq);
+
+ return 0;
+}
+
+/**
+ * ice_setup_channel - setup new channel using uplink element
+ * @pf: ptr to PF device
+ * @vsi: the VSI being setup
+ * @ch: ptr to channel structure
+ *
+ * Setup new channel (VSI) based on specified type (VMDq2/VF)
+ * and uplink switching element
+ */
+static bool
+ice_setup_channel(struct ice_pf *pf, struct ice_vsi *vsi,
+ struct ice_channel *ch)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ u16 sw_id;
+ int ret;
+
+ if (vsi->type != ICE_VSI_PF) {
+ dev_err(dev, "unsupported parent VSI type(%d)\n", vsi->type);
+ return false;
+ }
+
+ sw_id = pf->first_sw->sw_id;
+
+ /* create channel (VSI) */
+ ret = ice_setup_hw_channel(pf, vsi, ch, sw_id, ICE_VSI_CHNL);
+ if (ret) {
+ dev_err(dev, "failed to setup hw_channel\n");
+ return false;
+ }
+ dev_dbg(dev, "successfully created channel()\n");
+
+ return ch->ch_vsi ? true : false;
+}
+
+/**
+ * ice_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate
+ * @vsi: VSI to be configured
+ * @max_tx_rate: max Tx rate in Kbps to be configured as maximum BW limit
+ * @min_tx_rate: min Tx rate in Kbps to be configured as minimum BW limit
+ */
+static int
+ice_set_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate, u64 min_tx_rate)
+{
+ int err;
+
+ err = ice_set_min_bw_limit(vsi, min_tx_rate);
+ if (err)
+ return err;
+
+ return ice_set_max_bw_limit(vsi, max_tx_rate);
+}
+
+/**
+ * ice_create_q_channel - function to create channel
+ * @vsi: VSI to be configured
+ * @ch: ptr to channel (it contains channel specific params)
+ *
+ * This function creates channel (VSI) using num_queues specified by user,
+ * reconfigs RSS if needed.
+ */
+static int ice_create_q_channel(struct ice_vsi *vsi, struct ice_channel *ch)
+{
+ struct ice_pf *pf = vsi->back;
+ struct device *dev;
+
+ if (!ch)
+ return -EINVAL;
+
+ dev = ice_pf_to_dev(pf);
+ if (!ch->num_txq || !ch->num_rxq) {
+ dev_err(dev, "Invalid num_queues requested: %d\n", ch->num_rxq);
+ return -EINVAL;
+ }
+
+ if (!vsi->cnt_q_avail || vsi->cnt_q_avail < ch->num_txq) {
+ dev_err(dev, "cnt_q_avail (%u) less than num_queues %d\n",
+ vsi->cnt_q_avail, ch->num_txq);
+ return -EINVAL;
+ }
+
+ if (!ice_setup_channel(pf, vsi, ch)) {
+ dev_info(dev, "Failed to setup channel\n");
+ return -EINVAL;
+ }
+ /* configure BW rate limit */
+ if (ch->ch_vsi && (ch->max_tx_rate || ch->min_tx_rate)) {
+ int ret;
+
+ ret = ice_set_bw_limit(ch->ch_vsi, ch->max_tx_rate,
+ ch->min_tx_rate);
+ if (ret)
+ dev_err(dev, "failed to set Tx rate of %llu Kbps for VSI(%u)\n",
+ ch->max_tx_rate, ch->ch_vsi->vsi_num);
+ else
+ dev_dbg(dev, "set Tx rate of %llu Kbps for VSI(%u)\n",
+ ch->max_tx_rate, ch->ch_vsi->vsi_num);
+ }
+
+ vsi->cnt_q_avail -= ch->num_txq;
+
+ return 0;
+}
+
+/**
+ * ice_rem_all_chnl_fltrs - removes all channel filters
+ * @pf: ptr to PF, TC-flower based filter are tracked at PF level
+ *
+ * Remove all advanced switch filters only if they are channel specific
+ * tc-flower based filter
+ */
+static void ice_rem_all_chnl_fltrs(struct ice_pf *pf)
+{
+ struct ice_tc_flower_fltr *fltr;
+ struct hlist_node *node;
+
+ /* to remove all channel filters, iterate an ordered list of filters */
+ hlist_for_each_entry_safe(fltr, node,
+ &pf->tc_flower_fltr_list,
+ tc_flower_node) {
+ struct ice_rule_query_data rule;
+ int status;
+
+ /* for now process only channel specific filters */
+ if (!ice_is_chnl_fltr(fltr))
+ continue;
+
+ rule.rid = fltr->rid;
+ rule.rule_id = fltr->rule_id;
+ rule.vsi_handle = fltr->dest_id;
+ status = ice_rem_adv_rule_by_id(&pf->hw, &rule);
+ if (status) {
+ if (status == -ENOENT)
+ dev_dbg(ice_pf_to_dev(pf), "TC flower filter (rule_id %u) does not exist\n",
+ rule.rule_id);
+ else
+ dev_err(ice_pf_to_dev(pf), "failed to delete TC flower filter, status %d\n",
+ status);
+ } else if (fltr->dest_vsi) {
+ /* update advanced switch filter count */
+ if (fltr->dest_vsi->type == ICE_VSI_CHNL) {
+ u32 flags = fltr->flags;
+
+ fltr->dest_vsi->num_chnl_fltr--;
+ if (flags & (ICE_TC_FLWR_FIELD_DST_MAC |
+ ICE_TC_FLWR_FIELD_ENC_DST_MAC))
+ pf->num_dmac_chnl_fltrs--;
+ }
+ }
+
+ hlist_del(&fltr->tc_flower_node);
+ kfree(fltr);
+ }
+}
+
+/**
+ * ice_remove_q_channels - Remove queue channels for the TCs
+ * @vsi: VSI to be configured
+ * @rem_fltr: delete advanced switch filter or not
+ *
+ * Remove queue channels for the TCs
+ */
+static void ice_remove_q_channels(struct ice_vsi *vsi, bool rem_fltr)
+{
+ struct ice_channel *ch, *ch_tmp;
+ struct ice_pf *pf = vsi->back;
+ int i;
+
+ /* remove all tc-flower based filter if they are channel filters only */
+ if (rem_fltr)
+ ice_rem_all_chnl_fltrs(pf);
+
+ /* perform cleanup for channels if they exist */
+ list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
+ struct ice_vsi *ch_vsi;
+
+ list_del(&ch->list);
+ ch_vsi = ch->ch_vsi;
+ if (!ch_vsi) {
+ kfree(ch);
+ continue;
+ }
+
+ /* Reset queue contexts */
+ for (i = 0; i < ch->num_rxq; i++) {
+ struct ice_tx_ring *tx_ring;
+ struct ice_rx_ring *rx_ring;
+
+ tx_ring = vsi->tx_rings[ch->base_q + i];
+ rx_ring = vsi->rx_rings[ch->base_q + i];
+ if (tx_ring) {
+ tx_ring->ch = NULL;
+ if (tx_ring->q_vector)
+ tx_ring->q_vector->ch = NULL;
+ }
+ if (rx_ring) {
+ rx_ring->ch = NULL;
+ if (rx_ring->q_vector)
+ rx_ring->q_vector->ch = NULL;
+ }
+ }
+
+ /* clear the VSI from scheduler tree */
+ ice_rm_vsi_lan_cfg(ch->ch_vsi->port_info, ch->ch_vsi->idx);
+
+ /* Delete VSI from FW */
+ ice_vsi_delete(ch->ch_vsi);
+
+ /* Delete VSI from PF and HW VSI arrays */
+ ice_vsi_clear(ch->ch_vsi);
+
+ /* free the channel */
+ kfree(ch);
+ }
+
+ /* clear the channel VSI map which is stored in main VSI */
+ ice_for_each_chnl_tc(i)
+ vsi->tc_map_vsi[i] = NULL;
+
+ /* reset main VSI's all TC information */
+ vsi->all_enatc = 0;
+ vsi->all_numtc = 0;
+}
+
+/**
+ * ice_rebuild_channels - rebuild channel
+ * @pf: ptr to PF
+ *
+ * Recreate channel VSIs and replay filters
+ */
+static int ice_rebuild_channels(struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_vsi *main_vsi;
+ bool rem_adv_fltr = true;
+ struct ice_channel *ch;
+ struct ice_vsi *vsi;
+ int tc_idx = 1;
+ int i, err;
+
+ main_vsi = ice_get_main_vsi(pf);
+ if (!main_vsi)
+ return 0;
+
+ if (!test_bit(ICE_FLAG_TC_MQPRIO, pf->flags) ||
+ main_vsi->old_numtc == 1)
+ return 0; /* nothing to be done */
+
+ /* reconfigure main VSI based on old value of TC and cached values
+ * for MQPRIO opts
+ */
+ err = ice_vsi_cfg_tc(main_vsi, main_vsi->old_ena_tc);
+ if (err) {
+ dev_err(dev, "failed configuring TC(ena_tc:0x%02x) for HW VSI=%u\n",
+ main_vsi->old_ena_tc, main_vsi->vsi_num);
+ return err;
+ }
+
+ /* rebuild ADQ VSIs */
+ ice_for_each_vsi(pf, i) {
+ enum ice_vsi_type type;
+
+ vsi = pf->vsi[i];
+ if (!vsi || vsi->type != ICE_VSI_CHNL)
+ continue;
+
+ type = vsi->type;
+
+ /* rebuild ADQ VSI */
+ err = ice_vsi_rebuild(vsi, true);
+ if (err) {
+ dev_err(dev, "VSI (type:%s) at index %d rebuild failed, err %d\n",
+ ice_vsi_type_str(type), vsi->idx, err);
+ goto cleanup;
+ }
+
+ /* Re-map HW VSI number, using VSI handle that has been
+ * previously validated in ice_replay_vsi() call above
+ */
+ vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
+
+ /* replay filters for the VSI */
+ err = ice_replay_vsi(&pf->hw, vsi->idx);
+ if (err) {
+ dev_err(dev, "VSI (type:%s) replay failed, err %d, VSI index %d\n",
+ ice_vsi_type_str(type), err, vsi->idx);
+ rem_adv_fltr = false;
+ goto cleanup;
+ }
+ dev_info(dev, "VSI (type:%s) at index %d rebuilt successfully\n",
+ ice_vsi_type_str(type), vsi->idx);
+
+ /* store ADQ VSI at correct TC index in main VSI's
+ * map of TC to VSI
+ */
+ main_vsi->tc_map_vsi[tc_idx++] = vsi;
+ }
+
+ /* ADQ VSI(s) has been rebuilt successfully, so setup
+ * channel for main VSI's Tx and Rx rings
+ */
+ list_for_each_entry(ch, &main_vsi->ch_list, list) {
+ struct ice_vsi *ch_vsi;
+
+ ch_vsi = ch->ch_vsi;
+ if (!ch_vsi)
+ continue;
+
+ /* reconfig channel resources */
+ ice_cfg_chnl_all_res(main_vsi, ch);
+
+ /* replay BW rate limit if it is non-zero */
+ if (!ch->max_tx_rate && !ch->min_tx_rate)
+ continue;
+
+ err = ice_set_bw_limit(ch_vsi, ch->max_tx_rate,
+ ch->min_tx_rate);
+ if (err)
+ dev_err(dev, "failed (err:%d) to rebuild BW rate limit, max_tx_rate: %llu Kbps, min_tx_rate: %llu Kbps for VSI(%u)\n",
+ err, ch->max_tx_rate, ch->min_tx_rate,
+ ch_vsi->vsi_num);
+ else
+ dev_dbg(dev, "successfully rebuild BW rate limit, max_tx_rate: %llu Kbps, min_tx_rate: %llu Kbps for VSI(%u)\n",
+ ch->max_tx_rate, ch->min_tx_rate,
+ ch_vsi->vsi_num);
+ }
+
+ /* reconfig RSS for main VSI */
+ if (main_vsi->ch_rss_size)
+ ice_vsi_cfg_rss_lut_key(main_vsi);
+
+ return 0;
+
+cleanup:
+ ice_remove_q_channels(main_vsi, rem_adv_fltr);
+ return err;
+}
+
+/**
+ * ice_create_q_channels - Add queue channel for the given TCs
+ * @vsi: VSI to be configured
+ *
+ * Configures queue channel mapping to the given TCs
+ */
+static int ice_create_q_channels(struct ice_vsi *vsi)
+{
+ struct ice_pf *pf = vsi->back;
+ struct ice_channel *ch;
+ int ret = 0, i;
+
+ ice_for_each_chnl_tc(i) {
+ if (!(vsi->all_enatc & BIT(i)))
+ continue;
+
+ ch = kzalloc(sizeof(*ch), GFP_KERNEL);
+ if (!ch) {
+ ret = -ENOMEM;
+ goto err_free;
+ }
+ INIT_LIST_HEAD(&ch->list);
+ ch->num_rxq = vsi->mqprio_qopt.qopt.count[i];
+ ch->num_txq = vsi->mqprio_qopt.qopt.count[i];
+ ch->base_q = vsi->mqprio_qopt.qopt.offset[i];
+ ch->max_tx_rate = vsi->mqprio_qopt.max_rate[i];
+ ch->min_tx_rate = vsi->mqprio_qopt.min_rate[i];
+
+ /* convert to Kbits/s */
+ if (ch->max_tx_rate)
+ ch->max_tx_rate = div_u64(ch->max_tx_rate,
+ ICE_BW_KBPS_DIVISOR);
+ if (ch->min_tx_rate)
+ ch->min_tx_rate = div_u64(ch->min_tx_rate,
+ ICE_BW_KBPS_DIVISOR);
+
+ ret = ice_create_q_channel(vsi, ch);
+ if (ret) {
+ dev_err(ice_pf_to_dev(pf),
+ "failed creating channel TC:%d\n", i);
+ kfree(ch);
+ goto err_free;
+ }
+ list_add_tail(&ch->list, &vsi->ch_list);
+ vsi->tc_map_vsi[i] = ch->ch_vsi;
+ dev_dbg(ice_pf_to_dev(pf),
+ "successfully created channel: VSI %pK\n", ch->ch_vsi);
+ }
+ return 0;
+
+err_free:
+ ice_remove_q_channels(vsi, false);
+
+ return ret;
+}
+
+/**
+ * ice_setup_tc_mqprio_qdisc - configure multiple traffic classes
+ * @netdev: net device to configure
+ * @type_data: TC offload data
+ */
+static int ice_setup_tc_mqprio_qdisc(struct net_device *netdev, void *type_data)
+{
+ struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi *vsi = np->vsi;
+ struct ice_pf *pf = vsi->back;
+ u16 mode, ena_tc_qdisc = 0;
+ int cur_txq, cur_rxq;
+ u8 hw = 0, num_tcf;
+ struct device *dev;
+ int ret, i;
+
+ dev = ice_pf_to_dev(pf);
+ num_tcf = mqprio_qopt->qopt.num_tc;
+ hw = mqprio_qopt->qopt.hw;
+ mode = mqprio_qopt->mode;
+ if (!hw) {
+ clear_bit(ICE_FLAG_TC_MQPRIO, pf->flags);
+ vsi->ch_rss_size = 0;
+ memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt));
+ goto config_tcf;
+ }
+
+ /* Generate queue region map for number of TCF requested */
+ for (i = 0; i < num_tcf; i++)
+ ena_tc_qdisc |= BIT(i);
+
+ switch (mode) {
+ case TC_MQPRIO_MODE_CHANNEL:
+
+ ret = ice_validate_mqprio_qopt(vsi, mqprio_qopt);
+ if (ret) {
+ netdev_err(netdev, "failed to validate_mqprio_qopt(), ret %d\n",
+ ret);
+ return ret;
+ }
+ memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt));
+ set_bit(ICE_FLAG_TC_MQPRIO, pf->flags);
+ /* don't assume state of hw_tc_offload during driver load
+ * and set the flag for TC flower filter if hw_tc_offload
+ * already ON
+ */
+ if (vsi->netdev->features & NETIF_F_HW_TC)
+ set_bit(ICE_FLAG_CLS_FLOWER, pf->flags);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+config_tcf:
+
+ /* Requesting same TCF configuration as already enabled */
+ if (ena_tc_qdisc == vsi->tc_cfg.ena_tc &&
+ mode != TC_MQPRIO_MODE_CHANNEL)
+ return 0;
+
+ /* Pause VSI queues */
+ ice_dis_vsi(vsi, true);
+
+ if (!hw && !test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
+ ice_remove_q_channels(vsi, true);
+
+ if (!hw && !test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
+ vsi->req_txq = min_t(int, ice_get_avail_txq_count(pf),
+ num_online_cpus());
+ vsi->req_rxq = min_t(int, ice_get_avail_rxq_count(pf),
+ num_online_cpus());
+ } else {
+ /* logic to rebuild VSI, same like ethtool -L */
+ u16 offset = 0, qcount_tx = 0, qcount_rx = 0;
+
+ for (i = 0; i < num_tcf; i++) {
+ if (!(ena_tc_qdisc & BIT(i)))
+ continue;
+
+ offset = vsi->mqprio_qopt.qopt.offset[i];
+ qcount_rx = vsi->mqprio_qopt.qopt.count[i];
+ qcount_tx = vsi->mqprio_qopt.qopt.count[i];
+ }
+ vsi->req_txq = offset + qcount_tx;
+ vsi->req_rxq = offset + qcount_rx;
+
+ /* store away original rss_size info, so that it gets reused
+ * form ice_vsi_rebuild during tc-qdisc delete stage - to
+ * determine, what should be the rss_sizefor main VSI
+ */
+ vsi->orig_rss_size = vsi->rss_size;
+ }
+
+ /* save current values of Tx and Rx queues before calling VSI rebuild
+ * for fallback option
+ */
+ cur_txq = vsi->num_txq;
+ cur_rxq = vsi->num_rxq;
+
+ /* proceed with rebuild main VSI using correct number of queues */
+ ret = ice_vsi_rebuild(vsi, false);
+ if (ret) {
+ /* fallback to current number of queues */
+ dev_info(dev, "Rebuild failed with new queues, try with current number of queues\n");
+ vsi->req_txq = cur_txq;
+ vsi->req_rxq = cur_rxq;
+ clear_bit(ICE_RESET_FAILED, pf->state);
+ if (ice_vsi_rebuild(vsi, false)) {
+ dev_err(dev, "Rebuild of main VSI failed again\n");
+ return ret;
+ }
+ }
+
+ vsi->all_numtc = num_tcf;
+ vsi->all_enatc = ena_tc_qdisc;
+ ret = ice_vsi_cfg_tc(vsi, ena_tc_qdisc);
+ if (ret) {
+ netdev_err(netdev, "failed configuring TC for VSI id=%d\n",
+ vsi->vsi_num);
+ goto exit;
+ }
+
+ if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
+ u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
+ u64 min_tx_rate = vsi->mqprio_qopt.min_rate[0];
+
+ /* set TC0 rate limit if specified */
+ if (max_tx_rate || min_tx_rate) {
+ /* convert to Kbits/s */
+ if (max_tx_rate)
+ max_tx_rate = div_u64(max_tx_rate, ICE_BW_KBPS_DIVISOR);
+ if (min_tx_rate)
+ min_tx_rate = div_u64(min_tx_rate, ICE_BW_KBPS_DIVISOR);
+
+ ret = ice_set_bw_limit(vsi, max_tx_rate, min_tx_rate);
+ if (!ret) {
+ dev_dbg(dev, "set Tx rate max %llu min %llu for VSI(%u)\n",
+ max_tx_rate, min_tx_rate, vsi->vsi_num);
+ } else {
+ dev_err(dev, "failed to set Tx rate max %llu min %llu for VSI(%u)\n",
+ max_tx_rate, min_tx_rate, vsi->vsi_num);
+ goto exit;
+ }
+ }
+ ret = ice_create_q_channels(vsi);
+ if (ret) {
+ netdev_err(netdev, "failed configuring queue channels\n");
+ goto exit;
+ } else {
+ netdev_dbg(netdev, "successfully configured channels\n");
+ }
+ }
+
+ if (vsi->ch_rss_size)
+ ice_vsi_cfg_rss_lut_key(vsi);
+
+exit:
+ /* if error, reset the all_numtc and all_enatc */
+ if (ret) {
+ vsi->all_numtc = 0;
+ vsi->all_enatc = 0;
+ }
+ /* resume VSI */
+ ice_ena_vsi(vsi, true);
+
+ return ret;
+}
+
+static LIST_HEAD(ice_block_cb_list);
+
+static int
+ice_setup_tc(struct net_device *netdev, enum tc_setup_type type,
+ void *type_data)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_pf *pf = np->vsi->back;
+ int err;
+
+ switch (type) {
+ case TC_SETUP_BLOCK:
+ return flow_block_cb_setup_simple(type_data,
+ &ice_block_cb_list,
+ ice_setup_tc_block_cb,
+ np, np, true);
+ case TC_SETUP_QDISC_MQPRIO:
+ /* setup traffic classifier for receive side */
+ mutex_lock(&pf->tc_mutex);
+ err = ice_setup_tc_mqprio_qdisc(netdev, type_data);
+ mutex_unlock(&pf->tc_mutex);
+ return err;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return -EOPNOTSUPP;
+}
+
+/**
* ice_open - Called when a network interface becomes active
* @netdev: network interface device structure
*
@@ -7245,6 +8341,7 @@ static const struct net_device_ops ice_netdev_ops = {
.ndo_open = ice_open,
.ndo_stop = ice_stop,
.ndo_start_xmit = ice_start_xmit,
+ .ndo_select_queue = ice_select_queue,
.ndo_features_check = ice_features_check,
.ndo_set_rx_mode = ice_set_rx_mode,
.ndo_set_mac_address = ice_set_mac_address,
@@ -7260,8 +8357,10 @@ static const struct net_device_ops ice_netdev_ops = {
.ndo_set_vf_vlan = ice_set_vf_port_vlan,
.ndo_set_vf_link_state = ice_set_vf_link_state,
.ndo_get_vf_stats = ice_get_vf_stats,
+ .ndo_set_vf_rate = ice_set_vf_bw,
.ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid,
+ .ndo_setup_tc = ice_setup_tc,
.ndo_set_features = ice_set_features,
.ndo_bridge_getlink = ice_bridge_getlink,
.ndo_bridge_setlink = ice_bridge_setlink,
diff --git a/drivers/net/ethernet/intel/ice/ice_protocol_type.h b/drivers/net/ethernet/intel/ice/ice_protocol_type.h
index 199aa5b71540..0b220dfa7457 100644
--- a/drivers/net/ethernet/intel/ice/ice_protocol_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_protocol_type.h
@@ -3,6 +3,44 @@
#ifndef _ICE_PROTOCOL_TYPE_H_
#define _ICE_PROTOCOL_TYPE_H_
+#define ICE_IPV6_ADDR_LENGTH 16
+
+/* Each recipe can match up to 5 different fields. Fields to match can be meta-
+ * data, values extracted from packet headers, or results from other recipes.
+ * One of the 5 fields is reserved for matching the switch ID. So, up to 4
+ * recipes can provide intermediate results to another one through chaining,
+ * e.g. recipes 0, 1, 2, and 3 can provide intermediate results to recipe 4.
+ */
+#define ICE_NUM_WORDS_RECIPE 4
+
+/* Max recipes that can be chained */
+#define ICE_MAX_CHAIN_RECIPE 5
+
+/* 1 word reserved for switch ID from allowed 5 words.
+ * So a recipe can have max 4 words. And you can chain 5 such recipes
+ * together. So maximum words that can be programmed for look up is 5 * 4.
+ */
+#define ICE_MAX_CHAIN_WORDS (ICE_NUM_WORDS_RECIPE * ICE_MAX_CHAIN_RECIPE)
+
+/* Field vector index corresponding to chaining */
+#define ICE_CHAIN_FV_INDEX_START 47
+
+enum ice_protocol_type {
+ ICE_MAC_OFOS = 0,
+ ICE_MAC_IL,
+ ICE_ETYPE_OL,
+ ICE_VLAN_OFOS,
+ ICE_IPV4_OFOS,
+ ICE_IPV4_IL,
+ ICE_IPV6_OFOS,
+ ICE_IPV6_IL,
+ ICE_TCP_IL,
+ ICE_UDP_OF,
+ ICE_UDP_ILOS,
+ ICE_SCTP_IL,
+ ICE_PROTOCOL_LAST
+};
+
/* Decoders for ice_prot_id:
* - F: First
* - I: Inner
@@ -35,4 +73,135 @@ enum ice_prot_id {
ICE_PROT_META_ID = 255, /* when offset == metadata */
ICE_PROT_INVALID = 255 /* when offset == ICE_FV_OFFSET_INVAL */
};
+
+#define ICE_MAC_OFOS_HW 1
+#define ICE_MAC_IL_HW 4
+#define ICE_ETYPE_OL_HW 9
+#define ICE_VLAN_OF_HW 16
+#define ICE_VLAN_OL_HW 17
+#define ICE_IPV4_OFOS_HW 32
+#define ICE_IPV4_IL_HW 33
+#define ICE_IPV6_OFOS_HW 40
+#define ICE_IPV6_IL_HW 41
+#define ICE_TCP_IL_HW 49
+#define ICE_UDP_ILOS_HW 53
+
+#define ICE_UDP_OF_HW 52 /* UDP Tunnels */
+
+#define ICE_TUN_FLAG_FV_IND 2
+
+/* Mapping of software defined protocol ID to hardware defined protocol ID */
+struct ice_protocol_entry {
+ enum ice_protocol_type type;
+ u8 protocol_id;
+};
+
+struct ice_ether_hdr {
+ u8 dst_addr[ETH_ALEN];
+ u8 src_addr[ETH_ALEN];
+};
+
+struct ice_ethtype_hdr {
+ __be16 ethtype_id;
+};
+
+struct ice_ether_vlan_hdr {
+ u8 dst_addr[ETH_ALEN];
+ u8 src_addr[ETH_ALEN];
+ __be32 vlan_id;
+};
+
+struct ice_vlan_hdr {
+ __be16 type;
+ __be16 vlan;
+};
+
+struct ice_ipv4_hdr {
+ u8 version;
+ u8 tos;
+ __be16 total_length;
+ __be16 id;
+ __be16 frag_off;
+ u8 time_to_live;
+ u8 protocol;
+ __be16 check;
+ __be32 src_addr;
+ __be32 dst_addr;
+};
+
+struct ice_ipv6_hdr {
+ __be32 be_ver_tc_flow;
+ __be16 payload_len;
+ u8 next_hdr;
+ u8 hop_limit;
+ u8 src_addr[ICE_IPV6_ADDR_LENGTH];
+ u8 dst_addr[ICE_IPV6_ADDR_LENGTH];
+};
+
+struct ice_sctp_hdr {
+ __be16 src_port;
+ __be16 dst_port;
+ __be32 verification_tag;
+ __be32 check;
+};
+
+struct ice_l4_hdr {
+ __be16 src_port;
+ __be16 dst_port;
+ __be16 len;
+ __be16 check;
+};
+
+union ice_prot_hdr {
+ struct ice_ether_hdr eth_hdr;
+ struct ice_ethtype_hdr ethertype;
+ struct ice_vlan_hdr vlan_hdr;
+ struct ice_ipv4_hdr ipv4_hdr;
+ struct ice_ipv6_hdr ipv6_hdr;
+ struct ice_l4_hdr l4_hdr;
+ struct ice_sctp_hdr sctp_hdr;
+};
+
+/* This is mapping table entry that maps every word within a given protocol
+ * structure to the real byte offset as per the specification of that
+ * protocol header.
+ * for e.g. dst address is 3 words in ethertype header and corresponding bytes
+ * are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8
+ */
+struct ice_prot_ext_tbl_entry {
+ enum ice_protocol_type prot_type;
+ /* Byte offset into header of given protocol type */
+ u8 offs[sizeof(union ice_prot_hdr)];
+};
+
+/* Extractions to be looked up for a given recipe */
+struct ice_prot_lkup_ext {
+ u16 prot_type;
+ u8 n_val_words;
+ /* create a buffer to hold max words per recipe */
+ u16 field_off[ICE_MAX_CHAIN_WORDS];
+ u16 field_mask[ICE_MAX_CHAIN_WORDS];
+
+ struct ice_fv_word fv_words[ICE_MAX_CHAIN_WORDS];
+
+ /* Indicate field offsets that have field vector indices assigned */
+ DECLARE_BITMAP(done, ICE_MAX_CHAIN_WORDS);
+};
+
+struct ice_pref_recipe_group {
+ u8 n_val_pairs; /* Number of valid pairs */
+ struct ice_fv_word pairs[ICE_NUM_WORDS_RECIPE];
+ u16 mask[ICE_NUM_WORDS_RECIPE];
+};
+
+struct ice_recp_grp_entry {
+ struct list_head l_entry;
+
+#define ICE_INVAL_CHAIN_IND 0xFF
+ u16 rid;
+ u8 chain_idx;
+ u16 fv_idx[ICE_NUM_WORDS_RECIPE];
+ u16 fv_mask[ICE_NUM_WORDS_RECIPE];
+ struct ice_pref_recipe_group r_group;
+};
#endif /* _ICE_PROTOCOL_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
index 80380aed8882..a1be0d04a2d0 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
@@ -6,6 +6,252 @@
#define E810_OUT_PROP_DELAY_NS 1
+static const struct ptp_pin_desc ice_pin_desc_e810t[] = {
+ /* name idx func chan */
+ { "GNSS", GNSS, PTP_PF_EXTTS, 0, { 0, } },
+ { "SMA1", SMA1, PTP_PF_NONE, 1, { 0, } },
+ { "U.FL1", UFL1, PTP_PF_NONE, 1, { 0, } },
+ { "SMA2", SMA2, PTP_PF_NONE, 2, { 0, } },
+ { "U.FL2", UFL2, PTP_PF_NONE, 2, { 0, } },
+};
+
+/**
+ * ice_get_sma_config_e810t
+ * @hw: pointer to the hw struct
+ * @ptp_pins: pointer to the ptp_pin_desc struture
+ *
+ * Read the configuration of the SMA control logic and put it into the
+ * ptp_pin_desc structure
+ */
+static int
+ice_get_sma_config_e810t(struct ice_hw *hw, struct ptp_pin_desc *ptp_pins)
+{
+ u8 data, i;
+ int status;
+
+ /* Read initial pin state */
+ status = ice_read_sma_ctrl_e810t(hw, &data);
+ if (status)
+ return status;
+
+ /* initialize with defaults */
+ for (i = 0; i < NUM_PTP_PINS_E810T; i++) {
+ snprintf(ptp_pins[i].name, sizeof(ptp_pins[i].name),
+ "%s", ice_pin_desc_e810t[i].name);
+ ptp_pins[i].index = ice_pin_desc_e810t[i].index;
+ ptp_pins[i].func = ice_pin_desc_e810t[i].func;
+ ptp_pins[i].chan = ice_pin_desc_e810t[i].chan;
+ }
+
+ /* Parse SMA1/UFL1 */
+ switch (data & ICE_SMA1_MASK_E810T) {
+ case ICE_SMA1_MASK_E810T:
+ default:
+ ptp_pins[SMA1].func = PTP_PF_NONE;
+ ptp_pins[UFL1].func = PTP_PF_NONE;
+ break;
+ case ICE_SMA1_DIR_EN_E810T:
+ ptp_pins[SMA1].func = PTP_PF_PEROUT;
+ ptp_pins[UFL1].func = PTP_PF_NONE;
+ break;
+ case ICE_SMA1_TX_EN_E810T:
+ ptp_pins[SMA1].func = PTP_PF_EXTTS;
+ ptp_pins[UFL1].func = PTP_PF_NONE;
+ break;
+ case 0:
+ ptp_pins[SMA1].func = PTP_PF_EXTTS;
+ ptp_pins[UFL1].func = PTP_PF_PEROUT;
+ break;
+ }
+
+ /* Parse SMA2/UFL2 */
+ switch (data & ICE_SMA2_MASK_E810T) {
+ case ICE_SMA2_MASK_E810T:
+ default:
+ ptp_pins[SMA2].func = PTP_PF_NONE;
+ ptp_pins[UFL2].func = PTP_PF_NONE;
+ break;
+ case (ICE_SMA2_TX_EN_E810T | ICE_SMA2_UFL2_RX_DIS_E810T):
+ ptp_pins[SMA2].func = PTP_PF_EXTTS;
+ ptp_pins[UFL2].func = PTP_PF_NONE;
+ break;
+ case (ICE_SMA2_DIR_EN_E810T | ICE_SMA2_UFL2_RX_DIS_E810T):
+ ptp_pins[SMA2].func = PTP_PF_PEROUT;
+ ptp_pins[UFL2].func = PTP_PF_NONE;
+ break;
+ case (ICE_SMA2_DIR_EN_E810T | ICE_SMA2_TX_EN_E810T):
+ ptp_pins[SMA2].func = PTP_PF_NONE;
+ ptp_pins[UFL2].func = PTP_PF_EXTTS;
+ break;
+ case ICE_SMA2_DIR_EN_E810T:
+ ptp_pins[SMA2].func = PTP_PF_PEROUT;
+ ptp_pins[UFL2].func = PTP_PF_EXTTS;
+ break;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_ptp_set_sma_config_e810t
+ * @hw: pointer to the hw struct
+ * @ptp_pins: pointer to the ptp_pin_desc struture
+ *
+ * Set the configuration of the SMA control logic based on the configuration in
+ * num_pins parameter
+ */
+static int
+ice_ptp_set_sma_config_e810t(struct ice_hw *hw,
+ const struct ptp_pin_desc *ptp_pins)
+{
+ int status;
+ u8 data;
+
+ /* SMA1 and UFL1 cannot be set to TX at the same time */
+ if (ptp_pins[SMA1].func == PTP_PF_PEROUT &&
+ ptp_pins[UFL1].func == PTP_PF_PEROUT)
+ return -EINVAL;
+
+ /* SMA2 and UFL2 cannot be set to RX at the same time */
+ if (ptp_pins[SMA2].func == PTP_PF_EXTTS &&
+ ptp_pins[UFL2].func == PTP_PF_EXTTS)
+ return -EINVAL;
+
+ /* Read initial pin state value */
+ status = ice_read_sma_ctrl_e810t(hw, &data);
+ if (status)
+ return status;
+
+ /* Set the right sate based on the desired configuration */
+ data &= ~ICE_SMA1_MASK_E810T;
+ if (ptp_pins[SMA1].func == PTP_PF_NONE &&
+ ptp_pins[UFL1].func == PTP_PF_NONE) {
+ dev_info(ice_hw_to_dev(hw), "SMA1 + U.FL1 disabled");
+ data |= ICE_SMA1_MASK_E810T;
+ } else if (ptp_pins[SMA1].func == PTP_PF_EXTTS &&
+ ptp_pins[UFL1].func == PTP_PF_NONE) {
+ dev_info(ice_hw_to_dev(hw), "SMA1 RX");
+ data |= ICE_SMA1_TX_EN_E810T;
+ } else if (ptp_pins[SMA1].func == PTP_PF_NONE &&
+ ptp_pins[UFL1].func == PTP_PF_PEROUT) {
+ /* U.FL 1 TX will always enable SMA 1 RX */
+ dev_info(ice_hw_to_dev(hw), "SMA1 RX + U.FL1 TX");
+ } else if (ptp_pins[SMA1].func == PTP_PF_EXTTS &&
+ ptp_pins[UFL1].func == PTP_PF_PEROUT) {
+ dev_info(ice_hw_to_dev(hw), "SMA1 RX + U.FL1 TX");
+ } else if (ptp_pins[SMA1].func == PTP_PF_PEROUT &&
+ ptp_pins[UFL1].func == PTP_PF_NONE) {
+ dev_info(ice_hw_to_dev(hw), "SMA1 TX");
+ data |= ICE_SMA1_DIR_EN_E810T;
+ }
+
+ data &= ~ICE_SMA2_MASK_E810T;
+ if (ptp_pins[SMA2].func == PTP_PF_NONE &&
+ ptp_pins[UFL2].func == PTP_PF_NONE) {
+ dev_info(ice_hw_to_dev(hw), "SMA2 + U.FL2 disabled");
+ data |= ICE_SMA2_MASK_E810T;
+ } else if (ptp_pins[SMA2].func == PTP_PF_EXTTS &&
+ ptp_pins[UFL2].func == PTP_PF_NONE) {
+ dev_info(ice_hw_to_dev(hw), "SMA2 RX");
+ data |= (ICE_SMA2_TX_EN_E810T |
+ ICE_SMA2_UFL2_RX_DIS_E810T);
+ } else if (ptp_pins[SMA2].func == PTP_PF_NONE &&
+ ptp_pins[UFL2].func == PTP_PF_EXTTS) {
+ dev_info(ice_hw_to_dev(hw), "UFL2 RX");
+ data |= (ICE_SMA2_DIR_EN_E810T | ICE_SMA2_TX_EN_E810T);
+ } else if (ptp_pins[SMA2].func == PTP_PF_PEROUT &&
+ ptp_pins[UFL2].func == PTP_PF_NONE) {
+ dev_info(ice_hw_to_dev(hw), "SMA2 TX");
+ data |= (ICE_SMA2_DIR_EN_E810T |
+ ICE_SMA2_UFL2_RX_DIS_E810T);
+ } else if (ptp_pins[SMA2].func == PTP_PF_PEROUT &&
+ ptp_pins[UFL2].func == PTP_PF_EXTTS) {
+ dev_info(ice_hw_to_dev(hw), "SMA2 TX + U.FL2 RX");
+ data |= ICE_SMA2_DIR_EN_E810T;
+ }
+
+ return ice_write_sma_ctrl_e810t(hw, data);
+}
+
+/**
+ * ice_ptp_set_sma_e810t
+ * @info: the driver's PTP info structure
+ * @pin: pin index in kernel structure
+ * @func: Pin function to be set (PTP_PF_NONE, PTP_PF_EXTTS or PTP_PF_PEROUT)
+ *
+ * Set the configuration of a single SMA pin
+ */
+static int
+ice_ptp_set_sma_e810t(struct ptp_clock_info *info, unsigned int pin,
+ enum ptp_pin_function func)
+{
+ struct ptp_pin_desc ptp_pins[NUM_PTP_PINS_E810T];
+ struct ice_pf *pf = ptp_info_to_pf(info);
+ struct ice_hw *hw = &pf->hw;
+ int err;
+
+ if (pin < SMA1 || func > PTP_PF_PEROUT)
+ return -EOPNOTSUPP;
+
+ err = ice_get_sma_config_e810t(hw, ptp_pins);
+ if (err)
+ return err;
+
+ /* Disable the same function on the other pin sharing the channel */
+ if (pin == SMA1 && ptp_pins[UFL1].func == func)
+ ptp_pins[UFL1].func = PTP_PF_NONE;
+ if (pin == UFL1 && ptp_pins[SMA1].func == func)
+ ptp_pins[SMA1].func = PTP_PF_NONE;
+
+ if (pin == SMA2 && ptp_pins[UFL2].func == func)
+ ptp_pins[UFL2].func = PTP_PF_NONE;
+ if (pin == UFL2 && ptp_pins[SMA2].func == func)
+ ptp_pins[SMA2].func = PTP_PF_NONE;
+
+ /* Set up new pin function in the temp table */
+ ptp_pins[pin].func = func;
+
+ return ice_ptp_set_sma_config_e810t(hw, ptp_pins);
+}
+
+/**
+ * ice_verify_pin_e810t
+ * @info: the driver's PTP info structure
+ * @pin: Pin index
+ * @func: Assigned function
+ * @chan: Assigned channel
+ *
+ * Verify if pin supports requested pin function. If the Check pins consistency.
+ * Reconfigure the SMA logic attached to the given pin to enable its
+ * desired functionality
+ */
+static int
+ice_verify_pin_e810t(struct ptp_clock_info *info, unsigned int pin,
+ enum ptp_pin_function func, unsigned int chan)
+{
+ /* Don't allow channel reassignment */
+ if (chan != ice_pin_desc_e810t[pin].chan)
+ return -EOPNOTSUPP;
+
+ /* Check if functions are properly assigned */
+ switch (func) {
+ case PTP_PF_NONE:
+ break;
+ case PTP_PF_EXTTS:
+ if (pin == UFL1)
+ return -EOPNOTSUPP;
+ break;
+ case PTP_PF_PEROUT:
+ if (pin == UFL2 || pin == GNSS)
+ return -EOPNOTSUPP;
+ break;
+ case PTP_PF_PHYSYNC:
+ return -EOPNOTSUPP;
+ }
+
+ return ice_ptp_set_sma_e810t(info, pin, func);
+}
+
/**
* ice_set_tx_tstamp - Enable or disable Tx timestamping
* @pf: The PF pointer to search in
@@ -735,17 +981,34 @@ ice_ptp_gpio_enable_e810(struct ptp_clock_info *info,
{
struct ice_pf *pf = ptp_info_to_pf(info);
struct ice_perout_channel clk_cfg = {0};
+ bool sma_pres = false;
unsigned int chan;
u32 gpio_pin;
int err;
+ if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL))
+ sma_pres = true;
+
switch (rq->type) {
case PTP_CLK_REQ_PEROUT:
chan = rq->perout.index;
- if (chan == PPS_CLK_GEN_CHAN)
+ if (sma_pres) {
+ if (chan == ice_pin_desc_e810t[SMA1].chan)
+ clk_cfg.gpio_pin = GPIO_20;
+ else if (chan == ice_pin_desc_e810t[SMA2].chan)
+ clk_cfg.gpio_pin = GPIO_22;
+ else
+ return -1;
+ } else if (ice_is_e810t(&pf->hw)) {
+ if (chan == 0)
+ clk_cfg.gpio_pin = GPIO_20;
+ else
+ clk_cfg.gpio_pin = GPIO_22;
+ } else if (chan == PPS_CLK_GEN_CHAN) {
clk_cfg.gpio_pin = PPS_PIN_INDEX;
- else
+ } else {
clk_cfg.gpio_pin = chan;
+ }
clk_cfg.period = ((rq->perout.period.sec * NSEC_PER_SEC) +
rq->perout.period.nsec);
@@ -757,7 +1020,19 @@ ice_ptp_gpio_enable_e810(struct ptp_clock_info *info,
break;
case PTP_CLK_REQ_EXTTS:
chan = rq->extts.index;
- gpio_pin = chan;
+ if (sma_pres) {
+ if (chan < ice_pin_desc_e810t[SMA2].chan)
+ gpio_pin = GPIO_21;
+ else
+ gpio_pin = GPIO_23;
+ } else if (ice_is_e810t(&pf->hw)) {
+ if (chan == 0)
+ gpio_pin = GPIO_21;
+ else
+ gpio_pin = GPIO_23;
+ } else {
+ gpio_pin = chan;
+ }
err = ice_ptp_cfg_extts(pf, !!on, chan, gpio_pin,
rq->extts.flags);
@@ -1012,7 +1287,7 @@ int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr)
* The timestamp is in ns, so we must convert the result first.
*/
void
-ice_ptp_rx_hwtstamp(struct ice_ring *rx_ring,
+ice_ptp_rx_hwtstamp(struct ice_rx_ring *rx_ring,
union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb)
{
u32 ts_high;
@@ -1038,13 +1313,93 @@ ice_ptp_rx_hwtstamp(struct ice_ring *rx_ring,
}
/**
+ * ice_ptp_disable_sma_pins_e810t - Disable E810-T SMA pins
+ * @pf: pointer to the PF structure
+ * @info: PTP clock info structure
+ *
+ * Disable the OS access to the SMA pins. Called to clear out the OS
+ * indications of pin support when we fail to setup the E810-T SMA control
+ * register.
+ */
+static void
+ice_ptp_disable_sma_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+
+ dev_warn(dev, "Failed to configure E810-T SMA pin control\n");
+
+ info->enable = NULL;
+ info->verify = NULL;
+ info->n_pins = 0;
+ info->n_ext_ts = 0;
+ info->n_per_out = 0;
+}
+
+/**
+ * ice_ptp_setup_sma_pins_e810t - Setup the SMA pins
+ * @pf: pointer to the PF structure
+ * @info: PTP clock info structure
+ *
+ * Finish setting up the SMA pins by allocating pin_config, and setting it up
+ * according to the current status of the SMA. On failure, disable all of the
+ * extended SMA pin support.
+ */
+static void
+ice_ptp_setup_sma_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ int err;
+
+ /* Allocate memory for kernel pins interface */
+ info->pin_config = devm_kcalloc(dev, info->n_pins,
+ sizeof(*info->pin_config), GFP_KERNEL);
+ if (!info->pin_config) {
+ ice_ptp_disable_sma_pins_e810t(pf, info);
+ return;
+ }
+
+ /* Read current SMA status */
+ err = ice_get_sma_config_e810t(&pf->hw, info->pin_config);
+ if (err)
+ ice_ptp_disable_sma_pins_e810t(pf, info);
+}
+
+/**
+ * ice_ptp_setup_pins_e810t - Setup PTP pins in sysfs
+ * @pf: pointer to the PF instance
+ * @info: PTP clock capabilities
+ */
+static void
+ice_ptp_setup_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info)
+{
+ /* Check if SMA controller is in the netlist */
+ if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL) &&
+ !ice_is_pca9575_present(&pf->hw))
+ ice_clear_feature_support(pf, ICE_F_SMA_CTRL);
+
+ if (!ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) {
+ info->n_ext_ts = N_EXT_TS_E810_NO_SMA;
+ info->n_per_out = N_PER_OUT_E810T_NO_SMA;
+ return;
+ }
+
+ info->n_per_out = N_PER_OUT_E810T;
+ info->n_ext_ts = N_EXT_TS_E810;
+ info->n_pins = NUM_PTP_PINS_E810T;
+ info->verify = ice_verify_pin_e810t;
+
+ /* Complete setup of the SMA pins */
+ ice_ptp_setup_sma_pins_e810t(pf, info);
+}
+
+/**
* ice_ptp_setup_pins_e810 - Setup PTP pins in sysfs
* @info: PTP clock capabilities
*/
static void ice_ptp_setup_pins_e810(struct ptp_clock_info *info)
{
- info->n_per_out = E810_N_PER_OUT;
- info->n_ext_ts = E810_N_EXT_TS;
+ info->n_per_out = N_PER_OUT_E810;
+ info->n_ext_ts = N_EXT_TS_E810;
}
/**
@@ -1062,7 +1417,10 @@ ice_ptp_set_funcs_e810(struct ice_pf *pf, struct ptp_clock_info *info)
{
info->enable = ice_ptp_gpio_enable_e810;
- ice_ptp_setup_pins_e810(info);
+ if (ice_is_e810t(&pf->hw))
+ ice_ptp_setup_pins_e810t(pf, info);
+ else
+ ice_ptp_setup_pins_e810(info);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.h b/drivers/net/ethernet/intel/ice/ice_ptp.h
index e1c787bd5b96..f71ad317d6c8 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.h
@@ -9,12 +9,21 @@
#include "ice_ptp_hw.h"
-enum ice_ptp_pin {
+enum ice_ptp_pin_e810 {
GPIO_20 = 0,
GPIO_21,
GPIO_22,
GPIO_23,
- NUM_ICE_PTP_PIN
+ NUM_PTP_PIN_E810
+};
+
+enum ice_ptp_pin_e810t {
+ GNSS = 0,
+ SMA1,
+ UFL1,
+ SMA2,
+ UFL2,
+ NUM_PTP_PINS_E810T
};
struct ice_perout_channel {
@@ -155,8 +164,11 @@ struct ice_ptp {
#define PPS_CLK_SRC_CHAN 2
#define PPS_PIN_INDEX 5
#define TIME_SYNC_PIN_INDEX 4
-#define E810_N_EXT_TS 3
-#define E810_N_PER_OUT 4
+#define N_EXT_TS_E810 3
+#define N_PER_OUT_E810 4
+#define N_PER_OUT_E810T 3
+#define N_PER_OUT_E810T_NO_SMA 2
+#define N_EXT_TS_E810_NO_SMA 2
#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
struct ice_pf;
@@ -168,7 +180,7 @@ s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb);
void ice_ptp_process_ts(struct ice_pf *pf);
void
-ice_ptp_rx_hwtstamp(struct ice_ring *rx_ring,
+ice_ptp_rx_hwtstamp(struct ice_rx_ring *rx_ring,
union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb);
void ice_ptp_init(struct ice_pf *pf);
void ice_ptp_release(struct ice_pf *pf);
@@ -196,7 +208,7 @@ ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb)
static inline void ice_ptp_process_ts(struct ice_pf *pf) { }
static inline void
-ice_ptp_rx_hwtstamp(struct ice_ring *rx_ring,
+ice_ptp_rx_hwtstamp(struct ice_rx_ring *rx_ring,
union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb) { }
static inline void ice_ptp_init(struct ice_pf *pf) { }
static inline void ice_ptp_release(struct ice_pf *pf) { }
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
index 3eca0e4eab0b..29f947c0cd2e 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
@@ -649,3 +649,154 @@ int ice_clear_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx)
{
return ice_clear_phy_tstamp_e810(hw, block, idx);
}
+
+/* E810T SMA functions
+ *
+ * The following functions operate specifically on E810T hardware and are used
+ * to access the extended GPIOs available.
+ */
+
+/**
+ * ice_get_pca9575_handle
+ * @hw: pointer to the hw struct
+ * @pca9575_handle: GPIO controller's handle
+ *
+ * Find and return the GPIO controller's handle in the netlist.
+ * When found - the value will be cached in the hw structure and following calls
+ * will return cached value
+ */
+static int
+ice_get_pca9575_handle(struct ice_hw *hw, u16 *pca9575_handle)
+{
+ struct ice_aqc_get_link_topo *cmd;
+ struct ice_aq_desc desc;
+ int status;
+ u8 idx;
+
+ /* If handle was read previously return cached value */
+ if (hw->io_expander_handle) {
+ *pca9575_handle = hw->io_expander_handle;
+ return 0;
+ }
+
+ /* If handle was not detected read it from the netlist */
+ cmd = &desc.params.get_link_topo;
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
+
+ /* Set node type to GPIO controller */
+ cmd->addr.topo_params.node_type_ctx =
+ (ICE_AQC_LINK_TOPO_NODE_TYPE_M &
+ ICE_AQC_LINK_TOPO_NODE_TYPE_GPIO_CTRL);
+
+#define SW_PCA9575_SFP_TOPO_IDX 2
+#define SW_PCA9575_QSFP_TOPO_IDX 1
+
+ /* Check if the SW IO expander controlling SMA exists in the netlist. */
+ if (hw->device_id == ICE_DEV_ID_E810C_SFP)
+ idx = SW_PCA9575_SFP_TOPO_IDX;
+ else if (hw->device_id == ICE_DEV_ID_E810C_QSFP)
+ idx = SW_PCA9575_QSFP_TOPO_IDX;
+ else
+ return -EOPNOTSUPP;
+
+ cmd->addr.topo_params.index = idx;
+
+ status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+ if (status)
+ return -EOPNOTSUPP;
+
+ /* Verify if we found the right IO expander type */
+ if (desc.params.get_link_topo.node_part_num !=
+ ICE_AQC_GET_LINK_TOPO_NODE_NR_PCA9575)
+ return -EOPNOTSUPP;
+
+ /* If present save the handle and return it */
+ hw->io_expander_handle =
+ le16_to_cpu(desc.params.get_link_topo.addr.handle);
+ *pca9575_handle = hw->io_expander_handle;
+
+ return 0;
+}
+
+/**
+ * ice_read_sma_ctrl_e810t
+ * @hw: pointer to the hw struct
+ * @data: pointer to data to be read from the GPIO controller
+ *
+ * Read the SMA controller state. It is connected to pins 3-7 of Port 1 of the
+ * PCA9575 expander, so only bits 3-7 in data are valid.
+ */
+int ice_read_sma_ctrl_e810t(struct ice_hw *hw, u8 *data)
+{
+ int status;
+ u16 handle;
+ u8 i;
+
+ status = ice_get_pca9575_handle(hw, &handle);
+ if (status)
+ return status;
+
+ *data = 0;
+
+ for (i = ICE_SMA_MIN_BIT_E810T; i <= ICE_SMA_MAX_BIT_E810T; i++) {
+ bool pin;
+
+ status = ice_aq_get_gpio(hw, handle, i + ICE_PCA9575_P1_OFFSET,
+ &pin, NULL);
+ if (status)
+ break;
+ *data |= (u8)(!pin) << i;
+ }
+
+ return status;
+}
+
+/**
+ * ice_write_sma_ctrl_e810t
+ * @hw: pointer to the hw struct
+ * @data: data to be written to the GPIO controller
+ *
+ * Write the data to the SMA controller. It is connected to pins 3-7 of Port 1
+ * of the PCA9575 expander, so only bits 3-7 in data are valid.
+ */
+int ice_write_sma_ctrl_e810t(struct ice_hw *hw, u8 data)
+{
+ int status;
+ u16 handle;
+ u8 i;
+
+ status = ice_get_pca9575_handle(hw, &handle);
+ if (status)
+ return status;
+
+ for (i = ICE_SMA_MIN_BIT_E810T; i <= ICE_SMA_MAX_BIT_E810T; i++) {
+ bool pin;
+
+ pin = !(data & (1 << i));
+ status = ice_aq_set_gpio(hw, handle, i + ICE_PCA9575_P1_OFFSET,
+ pin, NULL);
+ if (status)
+ break;
+ }
+
+ return status;
+}
+
+/**
+ * ice_is_pca9575_present
+ * @hw: pointer to the hw struct
+ *
+ * Check if the SW IO expander is present in the netlist
+ */
+bool ice_is_pca9575_present(struct ice_hw *hw)
+{
+ u16 handle = 0;
+ int status;
+
+ if (!ice_is_e810t(hw))
+ return false;
+
+ status = ice_get_pca9575_handle(hw, &handle);
+
+ return !status && handle;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
index 55a414e87018..b2984b5c22c1 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
@@ -30,6 +30,9 @@ int ice_clear_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx);
/* E810 family functions */
int ice_ptp_init_phy_e810(struct ice_hw *hw);
+int ice_read_sma_ctrl_e810t(struct ice_hw *hw, u8 *data);
+int ice_write_sma_ctrl_e810t(struct ice_hw *hw, u8 data);
+bool ice_is_pca9575_present(struct ice_hw *hw);
#define PFTSYN_SEM_BYTES 4
@@ -76,4 +79,23 @@ int ice_ptp_init_phy_e810(struct ice_hw *hw);
#define LOW_TX_MEMORY_BANK_START 0x03090000
#define HIGH_TX_MEMORY_BANK_START 0x03090004
+/* E810T SMA controller pin control */
+#define ICE_SMA1_DIR_EN_E810T BIT(4)
+#define ICE_SMA1_TX_EN_E810T BIT(5)
+#define ICE_SMA2_UFL2_RX_DIS_E810T BIT(3)
+#define ICE_SMA2_DIR_EN_E810T BIT(6)
+#define ICE_SMA2_TX_EN_E810T BIT(7)
+
+#define ICE_SMA1_MASK_E810T (ICE_SMA1_DIR_EN_E810T | \
+ ICE_SMA1_TX_EN_E810T)
+#define ICE_SMA2_MASK_E810T (ICE_SMA2_UFL2_RX_DIS_E810T | \
+ ICE_SMA2_DIR_EN_E810T | \
+ ICE_SMA2_TX_EN_E810T)
+#define ICE_ALL_SMA_MASK_E810T (ICE_SMA1_MASK_E810T | \
+ ICE_SMA2_MASK_E810T)
+
+#define ICE_SMA_MIN_BIT_E810T 3
+#define ICE_SMA_MAX_BIT_E810T 7
+#define ICE_PCA9575_P1_OFFSET 8
+
#endif /* _ICE_PTP_HW_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_repr.c b/drivers/net/ethernet/intel/ice/ice_repr.c
new file mode 100644
index 000000000000..c49eeea7cb67
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_repr.c
@@ -0,0 +1,386 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2019-2021, Intel Corporation. */
+
+#include "ice.h"
+#include "ice_eswitch.h"
+#include "ice_devlink.h"
+#include "ice_virtchnl_pf.h"
+#include "ice_tc_lib.h"
+
+/**
+ * ice_repr_get_sw_port_id - get port ID associated with representor
+ * @repr: pointer to port representor
+ */
+static int ice_repr_get_sw_port_id(struct ice_repr *repr)
+{
+ return repr->vf->pf->hw.port_info->lport;
+}
+
+/**
+ * ice_repr_get_phys_port_name - get phys port name
+ * @netdev: pointer to port representor netdev
+ * @buf: write here port name
+ * @len: max length of buf
+ */
+static int
+ice_repr_get_phys_port_name(struct net_device *netdev, char *buf, size_t len)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_repr *repr = np->repr;
+ int res;
+
+ /* Devlink port is registered and devlink core is taking care of name formatting. */
+ if (repr->vf->devlink_port.devlink)
+ return -EOPNOTSUPP;
+
+ res = snprintf(buf, len, "pf%dvfr%d", ice_repr_get_sw_port_id(repr),
+ repr->vf->vf_id);
+ if (res <= 0)
+ return -EOPNOTSUPP;
+ return 0;
+}
+
+/**
+ * ice_repr_get_stats64 - get VF stats for VFPR use
+ * @netdev: pointer to port representor netdev
+ * @stats: pointer to struct where stats can be stored
+ */
+static void
+ice_repr_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_eth_stats *eth_stats;
+ struct ice_vsi *vsi;
+
+ if (ice_is_vf_disabled(np->repr->vf))
+ return;
+ vsi = np->repr->src_vsi;
+
+ ice_update_vsi_stats(vsi);
+ eth_stats = &vsi->eth_stats;
+
+ stats->tx_packets = eth_stats->tx_unicast + eth_stats->tx_broadcast +
+ eth_stats->tx_multicast;
+ stats->rx_packets = eth_stats->rx_unicast + eth_stats->rx_broadcast +
+ eth_stats->rx_multicast;
+ stats->tx_bytes = eth_stats->tx_bytes;
+ stats->rx_bytes = eth_stats->rx_bytes;
+ stats->multicast = eth_stats->rx_multicast;
+ stats->tx_errors = eth_stats->tx_errors;
+ stats->tx_dropped = eth_stats->tx_discards;
+ stats->rx_dropped = eth_stats->rx_discards;
+}
+
+/**
+ * ice_netdev_to_repr - Get port representor for given netdevice
+ * @netdev: pointer to port representor netdev
+ */
+struct ice_repr *ice_netdev_to_repr(struct net_device *netdev)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+
+ return np->repr;
+}
+
+/**
+ * ice_repr_open - Enable port representor's network interface
+ * @netdev: network interface device structure
+ *
+ * The open entry point is called when a port representor's network
+ * interface is made active by the system (IFF_UP). Corresponding
+ * VF is notified about link status change.
+ *
+ * Returns 0 on success
+ */
+static int ice_repr_open(struct net_device *netdev)
+{
+ struct ice_repr *repr = ice_netdev_to_repr(netdev);
+ struct ice_vf *vf;
+
+ vf = repr->vf;
+ vf->link_forced = true;
+ vf->link_up = true;
+ ice_vc_notify_vf_link_state(vf);
+
+ netif_carrier_on(netdev);
+ netif_tx_start_all_queues(netdev);
+
+ return 0;
+}
+
+/**
+ * ice_repr_stop - Disable port representor's network interface
+ * @netdev: network interface device structure
+ *
+ * The stop entry point is called when a port representor's network
+ * interface is de-activated by the system. Corresponding
+ * VF is notified about link status change.
+ *
+ * Returns 0 on success
+ */
+static int ice_repr_stop(struct net_device *netdev)
+{
+ struct ice_repr *repr = ice_netdev_to_repr(netdev);
+ struct ice_vf *vf;
+
+ vf = repr->vf;
+ vf->link_forced = true;
+ vf->link_up = false;
+ ice_vc_notify_vf_link_state(vf);
+
+ netif_carrier_off(netdev);
+ netif_tx_stop_all_queues(netdev);
+
+ return 0;
+}
+
+static struct devlink_port *
+ice_repr_get_devlink_port(struct net_device *netdev)
+{
+ struct ice_repr *repr = ice_netdev_to_repr(netdev);
+
+ return &repr->vf->devlink_port;
+}
+
+static int
+ice_repr_setup_tc_cls_flower(struct ice_repr *repr,
+ struct flow_cls_offload *flower)
+{
+ switch (flower->command) {
+ case FLOW_CLS_REPLACE:
+ return ice_add_cls_flower(repr->netdev, repr->src_vsi, flower);
+ case FLOW_CLS_DESTROY:
+ return ice_del_cls_flower(repr->src_vsi, flower);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int
+ice_repr_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
+ void *cb_priv)
+{
+ struct flow_cls_offload *flower = (struct flow_cls_offload *)type_data;
+ struct ice_netdev_priv *np = (struct ice_netdev_priv *)cb_priv;
+
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ return ice_repr_setup_tc_cls_flower(np->repr, flower);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static LIST_HEAD(ice_repr_block_cb_list);
+
+static int
+ice_repr_setup_tc(struct net_device *netdev, enum tc_setup_type type,
+ void *type_data)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+
+ switch (type) {
+ case TC_SETUP_BLOCK:
+ return flow_block_cb_setup_simple((struct flow_block_offload *)
+ type_data,
+ &ice_repr_block_cb_list,
+ ice_repr_setup_tc_block_cb,
+ np, np, true);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static const struct net_device_ops ice_repr_netdev_ops = {
+ .ndo_get_phys_port_name = ice_repr_get_phys_port_name,
+ .ndo_get_stats64 = ice_repr_get_stats64,
+ .ndo_open = ice_repr_open,
+ .ndo_stop = ice_repr_stop,
+ .ndo_start_xmit = ice_eswitch_port_start_xmit,
+ .ndo_get_devlink_port = ice_repr_get_devlink_port,
+ .ndo_setup_tc = ice_repr_setup_tc,
+};
+
+/**
+ * ice_is_port_repr_netdev - Check if a given netdevice is a port representor netdev
+ * @netdev: pointer to netdev
+ */
+bool ice_is_port_repr_netdev(struct net_device *netdev)
+{
+ return netdev && (netdev->netdev_ops == &ice_repr_netdev_ops);
+}
+
+/**
+ * ice_repr_reg_netdev - register port representor netdev
+ * @netdev: pointer to port representor netdev
+ */
+static int
+ice_repr_reg_netdev(struct net_device *netdev)
+{
+ eth_hw_addr_random(netdev);
+ netdev->netdev_ops = &ice_repr_netdev_ops;
+ ice_set_ethtool_repr_ops(netdev);
+
+ netdev->hw_features |= NETIF_F_HW_TC;
+
+ netif_carrier_off(netdev);
+ netif_tx_stop_all_queues(netdev);
+
+ return register_netdev(netdev);
+}
+
+/**
+ * ice_repr_add - add representor for VF
+ * @vf: pointer to VF structure
+ */
+static int ice_repr_add(struct ice_vf *vf)
+{
+ struct ice_q_vector *q_vector;
+ struct ice_netdev_priv *np;
+ struct ice_repr *repr;
+ int err;
+
+ repr = kzalloc(sizeof(*repr), GFP_KERNEL);
+ if (!repr)
+ return -ENOMEM;
+
+ repr->netdev = alloc_etherdev(sizeof(struct ice_netdev_priv));
+ if (!repr->netdev) {
+ err = -ENOMEM;
+ goto err_alloc;
+ }
+
+ repr->src_vsi = ice_get_vf_vsi(vf);
+ repr->vf = vf;
+ vf->repr = repr;
+ np = netdev_priv(repr->netdev);
+ np->repr = repr;
+
+ q_vector = kzalloc(sizeof(*q_vector), GFP_KERNEL);
+ if (!q_vector) {
+ err = -ENOMEM;
+ goto err_alloc_q_vector;
+ }
+ repr->q_vector = q_vector;
+
+ err = ice_devlink_create_vf_port(vf);
+ if (err)
+ goto err_devlink;
+
+ err = ice_repr_reg_netdev(repr->netdev);
+ if (err)
+ goto err_netdev;
+
+ devlink_port_type_eth_set(&vf->devlink_port, repr->netdev);
+
+ return 0;
+
+err_netdev:
+ ice_devlink_destroy_vf_port(vf);
+err_devlink:
+ kfree(repr->q_vector);
+ vf->repr->q_vector = NULL;
+err_alloc_q_vector:
+ free_netdev(repr->netdev);
+ repr->netdev = NULL;
+err_alloc:
+ kfree(repr);
+ vf->repr = NULL;
+ return err;
+}
+
+/**
+ * ice_repr_rem - remove representor from VF
+ * @vf: pointer to VF structure
+ */
+static void ice_repr_rem(struct ice_vf *vf)
+{
+ ice_devlink_destroy_vf_port(vf);
+ kfree(vf->repr->q_vector);
+ vf->repr->q_vector = NULL;
+ unregister_netdev(vf->repr->netdev);
+ free_netdev(vf->repr->netdev);
+ vf->repr->netdev = NULL;
+ kfree(vf->repr);
+ vf->repr = NULL;
+}
+
+/**
+ * ice_repr_add_for_all_vfs - add port representor for all VFs
+ * @pf: pointer to PF structure
+ */
+int ice_repr_add_for_all_vfs(struct ice_pf *pf)
+{
+ int err;
+ int i;
+
+ ice_for_each_vf(pf, i) {
+ struct ice_vf *vf = &pf->vf[i];
+
+ err = ice_repr_add(vf);
+ if (err)
+ goto err;
+
+ ice_vc_change_ops_to_repr(&vf->vc_ops);
+ }
+
+ return 0;
+
+err:
+ for (i = i - 1; i >= 0; i--) {
+ struct ice_vf *vf = &pf->vf[i];
+
+ ice_repr_rem(vf);
+ ice_vc_set_dflt_vf_ops(&vf->vc_ops);
+ }
+
+ return err;
+}
+
+/**
+ * ice_repr_rem_from_all_vfs - remove port representor for all VFs
+ * @pf: pointer to PF structure
+ */
+void ice_repr_rem_from_all_vfs(struct ice_pf *pf)
+{
+ int i;
+
+ ice_for_each_vf(pf, i) {
+ struct ice_vf *vf = &pf->vf[i];
+
+ ice_repr_rem(vf);
+ ice_vc_set_dflt_vf_ops(&vf->vc_ops);
+ }
+}
+
+/**
+ * ice_repr_start_tx_queues - start Tx queues of port representor
+ * @repr: pointer to repr structure
+ */
+void ice_repr_start_tx_queues(struct ice_repr *repr)
+{
+ netif_carrier_on(repr->netdev);
+ netif_tx_start_all_queues(repr->netdev);
+}
+
+/**
+ * ice_repr_stop_tx_queues - stop Tx queues of port representor
+ * @repr: pointer to repr structure
+ */
+void ice_repr_stop_tx_queues(struct ice_repr *repr)
+{
+ netif_carrier_off(repr->netdev);
+ netif_tx_stop_all_queues(repr->netdev);
+}
+
+/**
+ * ice_repr_set_traffic_vsi - set traffic VSI for port representor
+ * @repr: repr on with VSI will be set
+ * @vsi: pointer to VSI that will be used by port representor to pass traffic
+ */
+void ice_repr_set_traffic_vsi(struct ice_repr *repr, struct ice_vsi *vsi)
+{
+ struct ice_netdev_priv *np = netdev_priv(repr->netdev);
+
+ np->vsi = vsi;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_repr.h b/drivers/net/ethernet/intel/ice/ice_repr.h
new file mode 100644
index 000000000000..806de22933c6
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_repr.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2019-2021, Intel Corporation. */
+
+#ifndef _ICE_REPR_H_
+#define _ICE_REPR_H_
+
+#include <net/dst_metadata.h>
+#include "ice.h"
+
+struct ice_repr {
+ struct ice_vsi *src_vsi;
+ struct ice_vf *vf;
+ struct ice_q_vector *q_vector;
+ struct net_device *netdev;
+ struct metadata_dst *dst;
+};
+
+int ice_repr_add_for_all_vfs(struct ice_pf *pf);
+void ice_repr_rem_from_all_vfs(struct ice_pf *pf);
+
+void ice_repr_start_tx_queues(struct ice_repr *repr);
+void ice_repr_stop_tx_queues(struct ice_repr *repr);
+
+void ice_repr_set_traffic_vsi(struct ice_repr *repr, struct ice_vsi *vsi);
+
+struct ice_repr *ice_netdev_to_repr(struct net_device *netdev);
+bool ice_is_port_repr_netdev(struct net_device *netdev);
+#endif
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
index 2d9b10277186..ce3c7bded4cb 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.c
+++ b/drivers/net/ethernet/intel/ice/ice_sched.c
@@ -3012,6 +3012,43 @@ static void ice_set_clear_shared_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
}
/**
+ * ice_sched_save_vsi_bw - save VSI node's BW information
+ * @pi: port information structure
+ * @vsi_handle: sw VSI handle
+ * @tc: traffic class
+ * @rl_type: rate limit type min, max, or shared
+ * @bw: bandwidth in Kbps - Kilo bits per sec
+ *
+ * Save BW information of VSI type node for post replay use.
+ */
+static int
+ice_sched_save_vsi_bw(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
+ enum ice_rl_type rl_type, u32 bw)
+{
+ struct ice_vsi_ctx *vsi_ctx;
+
+ if (!ice_is_vsi_valid(pi->hw, vsi_handle))
+ return -EINVAL;
+ vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle);
+ if (!vsi_ctx)
+ return -EINVAL;
+ switch (rl_type) {
+ case ICE_MIN_BW:
+ ice_set_clear_cir_bw(&vsi_ctx->sched.bw_t_info[tc], bw);
+ break;
+ case ICE_MAX_BW:
+ ice_set_clear_eir_bw(&vsi_ctx->sched.bw_t_info[tc], bw);
+ break;
+ case ICE_SHARED_BW:
+ ice_set_clear_shared_bw(&vsi_ctx->sched.bw_t_info[tc], bw);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/**
* ice_sched_calc_wakeup - calculate RL profile wakeup parameter
* @hw: pointer to the HW struct
* @bw: bandwidth in Kbps
@@ -3784,6 +3821,153 @@ ice_cfg_q_bw_dflt_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
}
/**
+ * ice_sched_get_node_by_id_type - get node from ID type
+ * @pi: port information structure
+ * @id: identifier
+ * @agg_type: type of aggregator
+ * @tc: traffic class
+ *
+ * This function returns node identified by ID of type aggregator, and
+ * based on traffic class (TC). This function needs to be called with
+ * the scheduler lock held.
+ */
+static struct ice_sched_node *
+ice_sched_get_node_by_id_type(struct ice_port_info *pi, u32 id,
+ enum ice_agg_type agg_type, u8 tc)
+{
+ struct ice_sched_node *node = NULL;
+
+ switch (agg_type) {
+ case ICE_AGG_TYPE_VSI: {
+ struct ice_vsi_ctx *vsi_ctx;
+ u16 vsi_handle = (u16)id;
+
+ if (!ice_is_vsi_valid(pi->hw, vsi_handle))
+ break;
+ /* Get sched_vsi_info */
+ vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle);
+ if (!vsi_ctx)
+ break;
+ node = vsi_ctx->sched.vsi_node[tc];
+ break;
+ }
+
+ case ICE_AGG_TYPE_AGG: {
+ struct ice_sched_node *tc_node;
+
+ tc_node = ice_sched_get_tc_node(pi, tc);
+ if (tc_node)
+ node = ice_sched_get_agg_node(pi, tc_node, id);
+ break;
+ }
+
+ default:
+ break;
+ }
+
+ return node;
+}
+
+/**
+ * ice_sched_set_node_bw_lmt_per_tc - set node BW limit per TC
+ * @pi: port information structure
+ * @id: ID (software VSI handle or AGG ID)
+ * @agg_type: aggregator type (VSI or AGG type node)
+ * @tc: traffic class
+ * @rl_type: min or max
+ * @bw: bandwidth in Kbps
+ *
+ * This function sets BW limit of VSI or Aggregator scheduling node
+ * based on TC information from passed in argument BW.
+ */
+static enum ice_status
+ice_sched_set_node_bw_lmt_per_tc(struct ice_port_info *pi, u32 id,
+ enum ice_agg_type agg_type, u8 tc,
+ enum ice_rl_type rl_type, u32 bw)
+{
+ enum ice_status status = ICE_ERR_PARAM;
+ struct ice_sched_node *node;
+
+ if (!pi)
+ return status;
+
+ if (rl_type == ICE_UNKNOWN_BW)
+ return status;
+
+ mutex_lock(&pi->sched_lock);
+ node = ice_sched_get_node_by_id_type(pi, id, agg_type, tc);
+ if (!node) {
+ ice_debug(pi->hw, ICE_DBG_SCHED, "Wrong id, agg type, or tc\n");
+ goto exit_set_node_bw_lmt_per_tc;
+ }
+ if (bw == ICE_SCHED_DFLT_BW)
+ status = ice_sched_set_node_bw_dflt_lmt(pi, node, rl_type);
+ else
+ status = ice_sched_set_node_bw_lmt(pi, node, rl_type, bw);
+
+exit_set_node_bw_lmt_per_tc:
+ mutex_unlock(&pi->sched_lock);
+ return status;
+}
+
+/**
+ * ice_cfg_vsi_bw_lmt_per_tc - configure VSI BW limit per TC
+ * @pi: port information structure
+ * @vsi_handle: software VSI handle
+ * @tc: traffic class
+ * @rl_type: min or max
+ * @bw: bandwidth in Kbps
+ *
+ * This function configures BW limit of VSI scheduling node based on TC
+ * information.
+ */
+enum ice_status
+ice_cfg_vsi_bw_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
+ enum ice_rl_type rl_type, u32 bw)
+{
+ int status;
+
+ status = ice_sched_set_node_bw_lmt_per_tc(pi, vsi_handle,
+ ICE_AGG_TYPE_VSI,
+ tc, rl_type, bw);
+ if (!status) {
+ mutex_lock(&pi->sched_lock);
+ status = ice_sched_save_vsi_bw(pi, vsi_handle, tc, rl_type, bw);
+ mutex_unlock(&pi->sched_lock);
+ }
+ return status;
+}
+
+/**
+ * ice_cfg_vsi_bw_dflt_lmt_per_tc - configure default VSI BW limit per TC
+ * @pi: port information structure
+ * @vsi_handle: software VSI handle
+ * @tc: traffic class
+ * @rl_type: min or max
+ *
+ * This function configures default BW limit of VSI scheduling node based on TC
+ * information.
+ */
+enum ice_status
+ice_cfg_vsi_bw_dflt_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
+ enum ice_rl_type rl_type)
+{
+ int status;
+
+ status = ice_sched_set_node_bw_lmt_per_tc(pi, vsi_handle,
+ ICE_AGG_TYPE_VSI,
+ tc, rl_type,
+ ICE_SCHED_DFLT_BW);
+ if (!status) {
+ mutex_lock(&pi->sched_lock);
+ status = ice_sched_save_vsi_bw(pi, vsi_handle, tc, rl_type,
+ ICE_SCHED_DFLT_BW);
+ mutex_unlock(&pi->sched_lock);
+ }
+ return status;
+}
+
+/**
* ice_cfg_rl_burst_size - Set burst size value
* @hw: pointer to the HW struct
* @bytes: burst size in bytes
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.h b/drivers/net/ethernet/intel/ice/ice_sched.h
index fdf7a5882f07..6bddcbecaf5e 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.h
+++ b/drivers/net/ethernet/intel/ice/ice_sched.h
@@ -58,6 +58,8 @@ struct ice_sched_agg_info {
DECLARE_BITMAP(tc_bitmap, ICE_MAX_TRAFFIC_CLASS);
u32 agg_id;
enum ice_agg_type agg_type;
+ /* bw_t_info saves aggregator BW information */
+ struct ice_bw_type_info bw_t_info[ICE_MAX_TRAFFIC_CLASS];
/* save aggregator TC bitmap */
DECLARE_BITMAP(replay_tc_bitmap, ICE_MAX_TRAFFIC_CLASS);
};
@@ -104,6 +106,12 @@ ice_cfg_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
enum ice_status
ice_cfg_q_bw_dflt_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
u16 q_handle, enum ice_rl_type rl_type);
+enum ice_status
+ice_cfg_vsi_bw_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
+ enum ice_rl_type rl_type, u32 bw);
+enum ice_status
+ice_cfg_vsi_bw_dflt_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
+ enum ice_rl_type rl_type);
enum ice_status ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes);
void ice_sched_replay_agg_vsi_preinit(struct ice_hw *hw);
void ice_sched_replay_agg(struct ice_hw *hw);
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index 3b6c1420aa7b..2742e1c1e337 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -8,6 +8,7 @@
#define ICE_ETH_ETHTYPE_OFFSET 12
#define ICE_ETH_VLAN_TCI_OFFSET 14
#define ICE_MAX_VLAN_ID 0xFFF
+#define ICE_IPV6_ETHER_ID 0x86DD
/* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
* struct to configure any switch filter rules.
@@ -29,6 +30,290 @@ static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
0x2, 0, 0, 0, 0, 0,
0x81, 0, 0, 0};
+struct ice_dummy_pkt_offsets {
+ enum ice_protocol_type type;
+ u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */
+};
+
+/* offset info for MAC + IPv4 + UDP dummy packet */
+static const struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_ETYPE_OL, 12 },
+ { ICE_IPV4_OFOS, 14 },
+ { ICE_UDP_ILOS, 34 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+/* Dummy packet for MAC + IPv4 + UDP */
+static const u8 dummy_udp_packet[] = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x08, 0x00, /* ICE_ETYPE_OL 12 */
+
+ 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */
+ 0x00, 0x01, 0x00, 0x00,
+ 0x00, 0x11, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */
+ 0x00, 0x08, 0x00, 0x00,
+
+ 0x00, 0x00, /* 2 bytes for 4 byte alignment */
+};
+
+/* offset info for MAC + VLAN + IPv4 + UDP dummy packet */
+static const struct ice_dummy_pkt_offsets dummy_vlan_udp_packet_offsets[] = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_VLAN_OFOS, 12 },
+ { ICE_ETYPE_OL, 16 },
+ { ICE_IPV4_OFOS, 18 },
+ { ICE_UDP_ILOS, 38 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+/* C-tag (801.1Q), IPv4:UDP dummy packet */
+static const u8 dummy_vlan_udp_packet[] = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
+
+ 0x08, 0x00, /* ICE_ETYPE_OL 16 */
+
+ 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 18 */
+ 0x00, 0x01, 0x00, 0x00,
+ 0x00, 0x11, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 38 */
+ 0x00, 0x08, 0x00, 0x00,
+
+ 0x00, 0x00, /* 2 bytes for 4 byte alignment */
+};
+
+/* offset info for MAC + IPv4 + TCP dummy packet */
+static const struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_ETYPE_OL, 12 },
+ { ICE_IPV4_OFOS, 14 },
+ { ICE_TCP_IL, 34 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+/* Dummy packet for MAC + IPv4 + TCP */
+static const u8 dummy_tcp_packet[] = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x08, 0x00, /* ICE_ETYPE_OL 12 */
+
+ 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */
+ 0x00, 0x01, 0x00, 0x00,
+ 0x00, 0x06, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x50, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, /* 2 bytes for 4 byte alignment */
+};
+
+/* offset info for MAC + VLAN (C-tag, 802.1Q) + IPv4 + TCP dummy packet */
+static const struct ice_dummy_pkt_offsets dummy_vlan_tcp_packet_offsets[] = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_VLAN_OFOS, 12 },
+ { ICE_ETYPE_OL, 16 },
+ { ICE_IPV4_OFOS, 18 },
+ { ICE_TCP_IL, 38 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+/* C-tag (801.1Q), IPv4:TCP dummy packet */
+static const u8 dummy_vlan_tcp_packet[] = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
+
+ 0x08, 0x00, /* ICE_ETYPE_OL 16 */
+
+ 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 18 */
+ 0x00, 0x01, 0x00, 0x00,
+ 0x00, 0x06, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 38 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x50, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, /* 2 bytes for 4 byte alignment */
+};
+
+static const struct ice_dummy_pkt_offsets dummy_tcp_ipv6_packet_offsets[] = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_ETYPE_OL, 12 },
+ { ICE_IPV6_OFOS, 14 },
+ { ICE_TCP_IL, 54 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+static const u8 dummy_tcp_ipv6_packet[] = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
+
+ 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
+ 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x50, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, /* 2 bytes for 4 byte alignment */
+};
+
+/* C-tag (802.1Q): IPv6 + TCP */
+static const struct ice_dummy_pkt_offsets
+dummy_vlan_tcp_ipv6_packet_offsets[] = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_VLAN_OFOS, 12 },
+ { ICE_ETYPE_OL, 16 },
+ { ICE_IPV6_OFOS, 18 },
+ { ICE_TCP_IL, 58 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+/* C-tag (802.1Q), IPv6 + TCP dummy packet */
+static const u8 dummy_vlan_tcp_ipv6_packet[] = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
+
+ 0x86, 0xDD, /* ICE_ETYPE_OL 16 */
+
+ 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
+ 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 58 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x50, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, /* 2 bytes for 4 byte alignment */
+};
+
+/* IPv6 + UDP */
+static const struct ice_dummy_pkt_offsets dummy_udp_ipv6_packet_offsets[] = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_ETYPE_OL, 12 },
+ { ICE_IPV6_OFOS, 14 },
+ { ICE_UDP_ILOS, 54 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+/* IPv6 + UDP dummy packet */
+static const u8 dummy_udp_ipv6_packet[] = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
+
+ 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
+ 0x00, 0x10, 0x11, 0x00, /* Next header UDP */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */
+ 0x00, 0x10, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* needed for ESP packets */
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, /* 2 bytes for 4 byte alignment */
+};
+
+/* C-tag (802.1Q): IPv6 + UDP */
+static const struct ice_dummy_pkt_offsets
+dummy_vlan_udp_ipv6_packet_offsets[] = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_VLAN_OFOS, 12 },
+ { ICE_ETYPE_OL, 16 },
+ { ICE_IPV6_OFOS, 18 },
+ { ICE_UDP_ILOS, 58 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+/* C-tag (802.1Q), IPv6 + UDP dummy packet */
+static const u8 dummy_vlan_udp_ipv6_packet[] = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x81, 0x00, 0x00, 0x00,/* ICE_VLAN_OFOS 12 */
+
+ 0x86, 0xDD, /* ICE_ETYPE_OL 16 */
+
+ 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
+ 0x00, 0x08, 0x11, 0x00, /* Next header UDP */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 58 */
+ 0x00, 0x08, 0x00, 0x00,
+
+ 0x00, 0x00, /* 2 bytes for 4 byte alignment */
+};
+
#define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE \
(offsetof(struct ice_aqc_sw_rules_elem, pdata.lkup_tx_rx.hdr) + \
(DUMMY_ETH_HDR_LEN * \
@@ -42,6 +327,14 @@ static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
(offsetof(struct ice_aqc_sw_rules_elem, pdata.vsi_list.vsi) + \
((n) * sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi[0])))
+/* this is a recipe to profile association bitmap */
+static DECLARE_BITMAP(recipe_to_profile[ICE_MAX_NUM_RECIPES],
+ ICE_MAX_NUM_PROFILES);
+
+/* this is a profile to recipe association bitmap */
+static DECLARE_BITMAP(profile_to_recipe[ICE_MAX_NUM_PROFILES],
+ ICE_MAX_NUM_RECIPES);
+
/**
* ice_init_def_sw_recp - initialize the recipe book keeping tables
* @hw: pointer to the HW struct
@@ -59,10 +352,11 @@ enum ice_status ice_init_def_sw_recp(struct ice_hw *hw)
if (!recps)
return ICE_ERR_NO_MEMORY;
- for (i = 0; i < ICE_SW_LKUP_LAST; i++) {
+ for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
recps[i].root_rid = i;
INIT_LIST_HEAD(&recps[i].filt_rules);
INIT_LIST_HEAD(&recps[i].filt_replay_rules);
+ INIT_LIST_HEAD(&recps[i].rg_list);
mutex_init(&recps[i].filt_rule_lock);
}
@@ -518,7 +812,7 @@ ice_aq_alloc_free_vsi_list_exit:
*
* Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
*/
-static enum ice_status
+enum ice_status
ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
{
@@ -543,6 +837,358 @@ ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
return status;
}
+/**
+ * ice_aq_add_recipe - add switch recipe
+ * @hw: pointer to the HW struct
+ * @s_recipe_list: pointer to switch rule population list
+ * @num_recipes: number of switch recipes in the list
+ * @cd: pointer to command details structure or NULL
+ *
+ * Add(0x0290)
+ */
+static enum ice_status
+ice_aq_add_recipe(struct ice_hw *hw,
+ struct ice_aqc_recipe_data_elem *s_recipe_list,
+ u16 num_recipes, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_add_get_recipe *cmd;
+ struct ice_aq_desc desc;
+ u16 buf_size;
+
+ cmd = &desc.params.add_get_recipe;
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe);
+
+ cmd->num_sub_recipes = cpu_to_le16(num_recipes);
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+
+ buf_size = num_recipes * sizeof(*s_recipe_list);
+
+ return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
+}
+
+/**
+ * ice_aq_get_recipe - get switch recipe
+ * @hw: pointer to the HW struct
+ * @s_recipe_list: pointer to switch rule population list
+ * @num_recipes: pointer to the number of recipes (input and output)
+ * @recipe_root: root recipe number of recipe(s) to retrieve
+ * @cd: pointer to command details structure or NULL
+ *
+ * Get(0x0292)
+ *
+ * On input, *num_recipes should equal the number of entries in s_recipe_list.
+ * On output, *num_recipes will equal the number of entries returned in
+ * s_recipe_list.
+ *
+ * The caller must supply enough space in s_recipe_list to hold all possible
+ * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES.
+ */
+static enum ice_status
+ice_aq_get_recipe(struct ice_hw *hw,
+ struct ice_aqc_recipe_data_elem *s_recipe_list,
+ u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_add_get_recipe *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+ u16 buf_size;
+
+ if (*num_recipes != ICE_MAX_NUM_RECIPES)
+ return ICE_ERR_PARAM;
+
+ cmd = &desc.params.add_get_recipe;
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
+
+ cmd->return_index = cpu_to_le16(recipe_root);
+ cmd->num_sub_recipes = 0;
+
+ buf_size = *num_recipes * sizeof(*s_recipe_list);
+
+ status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
+ *num_recipes = le16_to_cpu(cmd->num_sub_recipes);
+
+ return status;
+}
+
+/**
+ * ice_aq_map_recipe_to_profile - Map recipe to packet profile
+ * @hw: pointer to the HW struct
+ * @profile_id: package profile ID to associate the recipe with
+ * @r_bitmap: Recipe bitmap filled in and need to be returned as response
+ * @cd: pointer to command details structure or NULL
+ * Recipe to profile association (0x0291)
+ */
+static enum ice_status
+ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_recipe_to_profile *cmd;
+ struct ice_aq_desc desc;
+
+ cmd = &desc.params.recipe_to_profile;
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile);
+ cmd->profile_id = cpu_to_le16(profile_id);
+ /* Set the recipe ID bit in the bitmask to let the device know which
+ * profile we are associating the recipe to
+ */
+ memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc));
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+}
+
+/**
+ * ice_aq_get_recipe_to_profile - Map recipe to packet profile
+ * @hw: pointer to the HW struct
+ * @profile_id: package profile ID to associate the recipe with
+ * @r_bitmap: Recipe bitmap filled in and need to be returned as response
+ * @cd: pointer to command details structure or NULL
+ * Associate profile ID with given recipe (0x0293)
+ */
+static enum ice_status
+ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_recipe_to_profile *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ cmd = &desc.params.recipe_to_profile;
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
+ cmd->profile_id = cpu_to_le16(profile_id);
+
+ status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+ if (!status)
+ memcpy(r_bitmap, cmd->recipe_assoc, sizeof(cmd->recipe_assoc));
+
+ return status;
+}
+
+/**
+ * ice_alloc_recipe - add recipe resource
+ * @hw: pointer to the hardware structure
+ * @rid: recipe ID returned as response to AQ call
+ */
+static enum ice_status ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
+{
+ struct ice_aqc_alloc_free_res_elem *sw_buf;
+ enum ice_status status;
+ u16 buf_len;
+
+ buf_len = struct_size(sw_buf, elem, 1);
+ sw_buf = kzalloc(buf_len, GFP_KERNEL);
+ if (!sw_buf)
+ return ICE_ERR_NO_MEMORY;
+
+ sw_buf->num_elems = cpu_to_le16(1);
+ sw_buf->res_type = cpu_to_le16((ICE_AQC_RES_TYPE_RECIPE <<
+ ICE_AQC_RES_TYPE_S) |
+ ICE_AQC_RES_TYPE_FLAG_SHARED);
+ status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
+ ice_aqc_opc_alloc_res, NULL);
+ if (!status)
+ *rid = le16_to_cpu(sw_buf->elem[0].e.sw_resp);
+ kfree(sw_buf);
+
+ return status;
+}
+
+/**
+ * ice_get_recp_to_prof_map - updates recipe to profile mapping
+ * @hw: pointer to hardware structure
+ *
+ * This function is used to populate recipe_to_profile matrix where index to
+ * this array is the recipe ID and the element is the mapping of which profiles
+ * is this recipe mapped to.
+ */
+static void ice_get_recp_to_prof_map(struct ice_hw *hw)
+{
+ DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES);
+ u16 i;
+
+ for (i = 0; i < hw->switch_info->max_used_prof_index + 1; i++) {
+ u16 j;
+
+ bitmap_zero(profile_to_recipe[i], ICE_MAX_NUM_RECIPES);
+ bitmap_zero(r_bitmap, ICE_MAX_NUM_RECIPES);
+ if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL))
+ continue;
+ bitmap_copy(profile_to_recipe[i], r_bitmap,
+ ICE_MAX_NUM_RECIPES);
+ for_each_set_bit(j, r_bitmap, ICE_MAX_NUM_RECIPES)
+ set_bit(i, recipe_to_profile[j]);
+ }
+}
+
+/**
+ * ice_collect_result_idx - copy result index values
+ * @buf: buffer that contains the result index
+ * @recp: the recipe struct to copy data into
+ */
+static void
+ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf,
+ struct ice_sw_recipe *recp)
+{
+ if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
+ set_bit(buf->content.result_indx & ~ICE_AQ_RECIPE_RESULT_EN,
+ recp->res_idxs);
+}
+
+/**
+ * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries
+ * @hw: pointer to hardware structure
+ * @recps: struct that we need to populate
+ * @rid: recipe ID that we are populating
+ * @refresh_required: true if we should get recipe to profile mapping from FW
+ *
+ * This function is used to populate all the necessary entries into our
+ * bookkeeping so that we have a current list of all the recipes that are
+ * programmed in the firmware.
+ */
+static enum ice_status
+ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
+ bool *refresh_required)
+{
+ DECLARE_BITMAP(result_bm, ICE_MAX_FV_WORDS);
+ struct ice_aqc_recipe_data_elem *tmp;
+ u16 num_recps = ICE_MAX_NUM_RECIPES;
+ struct ice_prot_lkup_ext *lkup_exts;
+ enum ice_status status;
+ u8 fv_word_idx = 0;
+ u16 sub_recps;
+
+ bitmap_zero(result_bm, ICE_MAX_FV_WORDS);
+
+ /* we need a buffer big enough to accommodate all the recipes */
+ tmp = kcalloc(ICE_MAX_NUM_RECIPES, sizeof(*tmp), GFP_KERNEL);
+ if (!tmp)
+ return ICE_ERR_NO_MEMORY;
+
+ tmp[0].recipe_indx = rid;
+ status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL);
+ /* non-zero status meaning recipe doesn't exist */
+ if (status)
+ goto err_unroll;
+
+ /* Get recipe to profile map so that we can get the fv from lkups that
+ * we read for a recipe from FW. Since we want to minimize the number of
+ * times we make this FW call, just make one call and cache the copy
+ * until a new recipe is added. This operation is only required the
+ * first time to get the changes from FW. Then to search existing
+ * entries we don't need to update the cache again until another recipe
+ * gets added.
+ */
+ if (*refresh_required) {
+ ice_get_recp_to_prof_map(hw);
+ *refresh_required = false;
+ }
+
+ /* Start populating all the entries for recps[rid] based on lkups from
+ * firmware. Note that we are only creating the root recipe in our
+ * database.
+ */
+ lkup_exts = &recps[rid].lkup_exts;
+
+ for (sub_recps = 0; sub_recps < num_recps; sub_recps++) {
+ struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps];
+ struct ice_recp_grp_entry *rg_entry;
+ u8 i, prof, idx, prot = 0;
+ bool is_root;
+ u16 off = 0;
+
+ rg_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*rg_entry),
+ GFP_KERNEL);
+ if (!rg_entry) {
+ status = ICE_ERR_NO_MEMORY;
+ goto err_unroll;
+ }
+
+ idx = root_bufs.recipe_indx;
+ is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT;
+
+ /* Mark all result indices in this chain */
+ if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
+ set_bit(root_bufs.content.result_indx & ~ICE_AQ_RECIPE_RESULT_EN,
+ result_bm);
+
+ /* get the first profile that is associated with rid */
+ prof = find_first_bit(recipe_to_profile[idx],
+ ICE_MAX_NUM_PROFILES);
+ for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
+ u8 lkup_indx = root_bufs.content.lkup_indx[i + 1];
+
+ rg_entry->fv_idx[i] = lkup_indx;
+ rg_entry->fv_mask[i] =
+ le16_to_cpu(root_bufs.content.mask[i + 1]);
+
+ /* If the recipe is a chained recipe then all its
+ * child recipe's result will have a result index.
+ * To fill fv_words we should not use those result
+ * index, we only need the protocol ids and offsets.
+ * We will skip all the fv_idx which stores result
+ * index in them. We also need to skip any fv_idx which
+ * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a
+ * valid offset value.
+ */
+ if (test_bit(rg_entry->fv_idx[i], hw->switch_info->prof_res_bm[prof]) ||
+ rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE ||
+ rg_entry->fv_idx[i] == 0)
+ continue;
+
+ ice_find_prot_off(hw, ICE_BLK_SW, prof,
+ rg_entry->fv_idx[i], &prot, &off);
+ lkup_exts->fv_words[fv_word_idx].prot_id = prot;
+ lkup_exts->fv_words[fv_word_idx].off = off;
+ lkup_exts->field_mask[fv_word_idx] =
+ rg_entry->fv_mask[i];
+ fv_word_idx++;
+ }
+ /* populate rg_list with the data from the child entry of this
+ * recipe
+ */
+ list_add(&rg_entry->l_entry, &recps[rid].rg_list);
+
+ /* Propagate some data to the recipe database */
+ recps[idx].is_root = !!is_root;
+ recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
+ bitmap_zero(recps[idx].res_idxs, ICE_MAX_FV_WORDS);
+ if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) {
+ recps[idx].chain_idx = root_bufs.content.result_indx &
+ ~ICE_AQ_RECIPE_RESULT_EN;
+ set_bit(recps[idx].chain_idx, recps[idx].res_idxs);
+ } else {
+ recps[idx].chain_idx = ICE_INVAL_CHAIN_IND;
+ }
+
+ if (!is_root)
+ continue;
+
+ /* Only do the following for root recipes entries */
+ memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap,
+ sizeof(recps[idx].r_bitmap));
+ recps[idx].root_rid = root_bufs.content.rid &
+ ~ICE_AQ_RECIPE_ID_IS_ROOT;
+ recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
+ }
+
+ /* Complete initialization of the root recipe entry */
+ lkup_exts->n_val_words = fv_word_idx;
+ recps[rid].big_recp = (num_recps > 1);
+ recps[rid].n_grp_count = (u8)num_recps;
+ recps[rid].root_buf = devm_kmemdup(ice_hw_to_dev(hw), tmp,
+ recps[rid].n_grp_count * sizeof(*recps[rid].root_buf),
+ GFP_KERNEL);
+ if (!recps[rid].root_buf)
+ goto err_unroll;
+
+ /* Copy result indexes */
+ bitmap_copy(recps[rid].res_idxs, result_bm, ICE_MAX_FV_WORDS);
+ recps[rid].recp_created = true;
+
+err_unroll:
+ kfree(tmp);
+ return status;
+}
+
/* ice_init_port_info - Initialize port_info with switch configuration data
* @pi: pointer to port_info
* @vsi_port_num: VSI number or port number
@@ -1627,6 +2273,125 @@ exit:
}
/**
+ * ice_mac_fltr_exist - does this MAC filter exist for given VSI
+ * @hw: pointer to the hardware structure
+ * @mac: MAC address to be checked (for MAC filter)
+ * @vsi_handle: check MAC filter for this VSI
+ */
+bool ice_mac_fltr_exist(struct ice_hw *hw, u8 *mac, u16 vsi_handle)
+{
+ struct ice_fltr_mgmt_list_entry *entry;
+ struct list_head *rule_head;
+ struct ice_switch_info *sw;
+ struct mutex *rule_lock; /* Lock to protect filter rule list */
+ u16 hw_vsi_id;
+
+ if (!ice_is_vsi_valid(hw, vsi_handle))
+ return false;
+
+ hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
+ sw = hw->switch_info;
+ rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
+ if (!rule_head)
+ return false;
+
+ rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
+ mutex_lock(rule_lock);
+ list_for_each_entry(entry, rule_head, list_entry) {
+ struct ice_fltr_info *f_info = &entry->fltr_info;
+ u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
+
+ if (is_zero_ether_addr(mac_addr))
+ continue;
+
+ if (f_info->flag != ICE_FLTR_TX ||
+ f_info->src_id != ICE_SRC_ID_VSI ||
+ f_info->lkup_type != ICE_SW_LKUP_MAC ||
+ f_info->fltr_act != ICE_FWD_TO_VSI ||
+ hw_vsi_id != f_info->fwd_id.hw_vsi_id)
+ continue;
+
+ if (ether_addr_equal(mac, mac_addr)) {
+ mutex_unlock(rule_lock);
+ return true;
+ }
+ }
+ mutex_unlock(rule_lock);
+ return false;
+}
+
+/**
+ * ice_vlan_fltr_exist - does this VLAN filter exist for given VSI
+ * @hw: pointer to the hardware structure
+ * @vlan_id: VLAN ID
+ * @vsi_handle: check MAC filter for this VSI
+ */
+bool ice_vlan_fltr_exist(struct ice_hw *hw, u16 vlan_id, u16 vsi_handle)
+{
+ struct ice_fltr_mgmt_list_entry *entry;
+ struct list_head *rule_head;
+ struct ice_switch_info *sw;
+ struct mutex *rule_lock; /* Lock to protect filter rule list */
+ u16 hw_vsi_id;
+
+ if (vlan_id > ICE_MAX_VLAN_ID)
+ return false;
+
+ if (!ice_is_vsi_valid(hw, vsi_handle))
+ return false;
+
+ hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
+ sw = hw->switch_info;
+ rule_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
+ if (!rule_head)
+ return false;
+
+ rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
+ mutex_lock(rule_lock);
+ list_for_each_entry(entry, rule_head, list_entry) {
+ struct ice_fltr_info *f_info = &entry->fltr_info;
+ u16 entry_vlan_id = f_info->l_data.vlan.vlan_id;
+ struct ice_vsi_list_map_info *map_info;
+
+ if (entry_vlan_id > ICE_MAX_VLAN_ID)
+ continue;
+
+ if (f_info->flag != ICE_FLTR_TX ||
+ f_info->src_id != ICE_SRC_ID_VSI ||
+ f_info->lkup_type != ICE_SW_LKUP_VLAN)
+ continue;
+
+ /* Only allowed filter action are FWD_TO_VSI/_VSI_LIST */
+ if (f_info->fltr_act != ICE_FWD_TO_VSI &&
+ f_info->fltr_act != ICE_FWD_TO_VSI_LIST)
+ continue;
+
+ if (f_info->fltr_act == ICE_FWD_TO_VSI) {
+ if (hw_vsi_id != f_info->fwd_id.hw_vsi_id)
+ continue;
+ } else if (f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
+ /* If filter_action is FWD_TO_VSI_LIST, make sure
+ * that VSI being checked is part of VSI list
+ */
+ if (entry->vsi_count == 1 &&
+ entry->vsi_list_info) {
+ map_info = entry->vsi_list_info;
+ if (!test_bit(vsi_handle, map_info->vsi_map))
+ continue;
+ }
+ }
+
+ if (vlan_id == entry_vlan_id) {
+ mutex_unlock(rule_lock);
+ return true;
+ }
+ }
+ mutex_unlock(rule_lock);
+
+ return false;
+}
+
+/**
* ice_add_mac - Add a MAC address based filter rule
* @hw: pointer to the hardware structure
* @m_list: list of MAC addresses and forwarding information
@@ -2037,6 +2802,27 @@ ice_rem_sw_rule_info(struct ice_hw *hw, struct list_head *rule_head)
}
/**
+ * ice_rem_adv_rule_info
+ * @hw: pointer to the hardware structure
+ * @rule_head: pointer to the switch list structure that we want to delete
+ */
+static void
+ice_rem_adv_rule_info(struct ice_hw *hw, struct list_head *rule_head)
+{
+ struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
+ struct ice_adv_fltr_mgmt_list_entry *lst_itr;
+
+ if (list_empty(rule_head))
+ return;
+
+ list_for_each_entry_safe(lst_itr, tmp_entry, rule_head, list_entry) {
+ list_del(&lst_itr->list_entry);
+ devm_kfree(ice_hw_to_dev(hw), lst_itr->lkups);
+ devm_kfree(ice_hw_to_dev(hw), lst_itr);
+ }
+}
+
+/**
* ice_cfg_dflt_vsi - change state of VSI to set/clear default
* @hw: pointer to the hardware structure
* @vsi_handle: VSI handle to set as default
@@ -2773,6 +3559,1459 @@ ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
return status;
}
+/* This is mapping table entry that maps every word within a given protocol
+ * structure to the real byte offset as per the specification of that
+ * protocol header.
+ * for example dst address is 3 words in ethertype header and corresponding
+ * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8
+ * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a
+ * matching entry describing its field. This needs to be updated if new
+ * structure is added to that union.
+ */
+static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
+ { ICE_MAC_OFOS, { 0, 2, 4, 6, 8, 10, 12 } },
+ { ICE_MAC_IL, { 0, 2, 4, 6, 8, 10, 12 } },
+ { ICE_ETYPE_OL, { 0 } },
+ { ICE_VLAN_OFOS, { 2, 0 } },
+ { ICE_IPV4_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
+ { ICE_IPV4_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
+ { ICE_IPV6_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
+ 26, 28, 30, 32, 34, 36, 38 } },
+ { ICE_IPV6_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
+ 26, 28, 30, 32, 34, 36, 38 } },
+ { ICE_TCP_IL, { 0, 2 } },
+ { ICE_UDP_OF, { 0, 2 } },
+ { ICE_UDP_ILOS, { 0, 2 } },
+};
+
+static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
+ { ICE_MAC_OFOS, ICE_MAC_OFOS_HW },
+ { ICE_MAC_IL, ICE_MAC_IL_HW },
+ { ICE_ETYPE_OL, ICE_ETYPE_OL_HW },
+ { ICE_VLAN_OFOS, ICE_VLAN_OL_HW },
+ { ICE_IPV4_OFOS, ICE_IPV4_OFOS_HW },
+ { ICE_IPV4_IL, ICE_IPV4_IL_HW },
+ { ICE_IPV6_OFOS, ICE_IPV6_OFOS_HW },
+ { ICE_IPV6_IL, ICE_IPV6_IL_HW },
+ { ICE_TCP_IL, ICE_TCP_IL_HW },
+ { ICE_UDP_OF, ICE_UDP_OF_HW },
+ { ICE_UDP_ILOS, ICE_UDP_ILOS_HW },
+};
+
+/**
+ * ice_find_recp - find a recipe
+ * @hw: pointer to the hardware structure
+ * @lkup_exts: extension sequence to match
+ *
+ * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
+ */
+static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts)
+{
+ bool refresh_required = true;
+ struct ice_sw_recipe *recp;
+ u8 i;
+
+ /* Walk through existing recipes to find a match */
+ recp = hw->switch_info->recp_list;
+ for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
+ /* If recipe was not created for this ID, in SW bookkeeping,
+ * check if FW has an entry for this recipe. If the FW has an
+ * entry update it in our SW bookkeeping and continue with the
+ * matching.
+ */
+ if (!recp[i].recp_created)
+ if (ice_get_recp_frm_fw(hw,
+ hw->switch_info->recp_list, i,
+ &refresh_required))
+ continue;
+
+ /* Skip inverse action recipes */
+ if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl &
+ ICE_AQ_RECIPE_ACT_INV_ACT)
+ continue;
+
+ /* if number of words we are looking for match */
+ if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) {
+ struct ice_fv_word *ar = recp[i].lkup_exts.fv_words;
+ struct ice_fv_word *be = lkup_exts->fv_words;
+ u16 *cr = recp[i].lkup_exts.field_mask;
+ u16 *de = lkup_exts->field_mask;
+ bool found = true;
+ u8 pe, qr;
+
+ /* ar, cr, and qr are related to the recipe words, while
+ * be, de, and pe are related to the lookup words
+ */
+ for (pe = 0; pe < lkup_exts->n_val_words; pe++) {
+ for (qr = 0; qr < recp[i].lkup_exts.n_val_words;
+ qr++) {
+ if (ar[qr].off == be[pe].off &&
+ ar[qr].prot_id == be[pe].prot_id &&
+ cr[qr] == de[pe])
+ /* Found the "pe"th word in the
+ * given recipe
+ */
+ break;
+ }
+ /* After walking through all the words in the
+ * "i"th recipe if "p"th word was not found then
+ * this recipe is not what we are looking for.
+ * So break out from this loop and try the next
+ * recipe
+ */
+ if (qr >= recp[i].lkup_exts.n_val_words) {
+ found = false;
+ break;
+ }
+ }
+ /* If for "i"th recipe the found was never set to false
+ * then it means we found our match
+ */
+ if (found)
+ return i; /* Return the recipe ID */
+ }
+ }
+ return ICE_MAX_NUM_RECIPES;
+}
+
+/**
+ * ice_prot_type_to_id - get protocol ID from protocol type
+ * @type: protocol type
+ * @id: pointer to variable that will receive the ID
+ *
+ * Returns true if found, false otherwise
+ */
+static bool ice_prot_type_to_id(enum ice_protocol_type type, u8 *id)
+{
+ u8 i;
+
+ for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++)
+ if (ice_prot_id_tbl[i].type == type) {
+ *id = ice_prot_id_tbl[i].protocol_id;
+ return true;
+ }
+ return false;
+}
+
+/**
+ * ice_fill_valid_words - count valid words
+ * @rule: advanced rule with lookup information
+ * @lkup_exts: byte offset extractions of the words that are valid
+ *
+ * calculate valid words in a lookup rule using mask value
+ */
+static u8
+ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
+ struct ice_prot_lkup_ext *lkup_exts)
+{
+ u8 j, word, prot_id, ret_val;
+
+ if (!ice_prot_type_to_id(rule->type, &prot_id))
+ return 0;
+
+ word = lkup_exts->n_val_words;
+
+ for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++)
+ if (((u16 *)&rule->m_u)[j] &&
+ rule->type < ARRAY_SIZE(ice_prot_ext)) {
+ /* No more space to accommodate */
+ if (word >= ICE_MAX_CHAIN_WORDS)
+ return 0;
+ lkup_exts->fv_words[word].off =
+ ice_prot_ext[rule->type].offs[j];
+ lkup_exts->fv_words[word].prot_id =
+ ice_prot_id_tbl[rule->type].protocol_id;
+ lkup_exts->field_mask[word] =
+ be16_to_cpu(((__force __be16 *)&rule->m_u)[j]);
+ word++;
+ }
+
+ ret_val = word - lkup_exts->n_val_words;
+ lkup_exts->n_val_words = word;
+
+ return ret_val;
+}
+
+/**
+ * ice_create_first_fit_recp_def - Create a recipe grouping
+ * @hw: pointer to the hardware structure
+ * @lkup_exts: an array of protocol header extractions
+ * @rg_list: pointer to a list that stores new recipe groups
+ * @recp_cnt: pointer to a variable that stores returned number of recipe groups
+ *
+ * Using first fit algorithm, take all the words that are still not done
+ * and start grouping them in 4-word groups. Each group makes up one
+ * recipe.
+ */
+static enum ice_status
+ice_create_first_fit_recp_def(struct ice_hw *hw,
+ struct ice_prot_lkup_ext *lkup_exts,
+ struct list_head *rg_list,
+ u8 *recp_cnt)
+{
+ struct ice_pref_recipe_group *grp = NULL;
+ u8 j;
+
+ *recp_cnt = 0;
+
+ /* Walk through every word in the rule to check if it is not done. If so
+ * then this word needs to be part of a new recipe.
+ */
+ for (j = 0; j < lkup_exts->n_val_words; j++)
+ if (!test_bit(j, lkup_exts->done)) {
+ if (!grp ||
+ grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) {
+ struct ice_recp_grp_entry *entry;
+
+ entry = devm_kzalloc(ice_hw_to_dev(hw),
+ sizeof(*entry),
+ GFP_KERNEL);
+ if (!entry)
+ return ICE_ERR_NO_MEMORY;
+ list_add(&entry->l_entry, rg_list);
+ grp = &entry->r_group;
+ (*recp_cnt)++;
+ }
+
+ grp->pairs[grp->n_val_pairs].prot_id =
+ lkup_exts->fv_words[j].prot_id;
+ grp->pairs[grp->n_val_pairs].off =
+ lkup_exts->fv_words[j].off;
+ grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j];
+ grp->n_val_pairs++;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_fill_fv_word_index - fill in the field vector indices for a recipe group
+ * @hw: pointer to the hardware structure
+ * @fv_list: field vector with the extraction sequence information
+ * @rg_list: recipe groupings with protocol-offset pairs
+ *
+ * Helper function to fill in the field vector indices for protocol-offset
+ * pairs. These indexes are then ultimately programmed into a recipe.
+ */
+static enum ice_status
+ice_fill_fv_word_index(struct ice_hw *hw, struct list_head *fv_list,
+ struct list_head *rg_list)
+{
+ struct ice_sw_fv_list_entry *fv;
+ struct ice_recp_grp_entry *rg;
+ struct ice_fv_word *fv_ext;
+
+ if (list_empty(fv_list))
+ return 0;
+
+ fv = list_first_entry(fv_list, struct ice_sw_fv_list_entry,
+ list_entry);
+ fv_ext = fv->fv_ptr->ew;
+
+ list_for_each_entry(rg, rg_list, l_entry) {
+ u8 i;
+
+ for (i = 0; i < rg->r_group.n_val_pairs; i++) {
+ struct ice_fv_word *pr;
+ bool found = false;
+ u16 mask;
+ u8 j;
+
+ pr = &rg->r_group.pairs[i];
+ mask = rg->r_group.mask[i];
+
+ for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
+ if (fv_ext[j].prot_id == pr->prot_id &&
+ fv_ext[j].off == pr->off) {
+ found = true;
+
+ /* Store index of field vector */
+ rg->fv_idx[i] = j;
+ rg->fv_mask[i] = mask;
+ break;
+ }
+
+ /* Protocol/offset could not be found, caller gave an
+ * invalid pair
+ */
+ if (!found)
+ return ICE_ERR_PARAM;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ice_find_free_recp_res_idx - find free result indexes for recipe
+ * @hw: pointer to hardware structure
+ * @profiles: bitmap of profiles that will be associated with the new recipe
+ * @free_idx: pointer to variable to receive the free index bitmap
+ *
+ * The algorithm used here is:
+ * 1. When creating a new recipe, create a set P which contains all
+ * Profiles that will be associated with our new recipe
+ *
+ * 2. For each Profile p in set P:
+ * a. Add all recipes associated with Profile p into set R
+ * b. Optional : PossibleIndexes &= profile[p].possibleIndexes
+ * [initially PossibleIndexes should be 0xFFFFFFFFFFFFFFFF]
+ * i. Or just assume they all have the same possible indexes:
+ * 44, 45, 46, 47
+ * i.e., PossibleIndexes = 0x0000F00000000000
+ *
+ * 3. For each Recipe r in set R:
+ * a. UsedIndexes |= (bitwise or ) recipe[r].res_indexes
+ * b. FreeIndexes = UsedIndexes ^ PossibleIndexes
+ *
+ * FreeIndexes will contain the bits indicating the indexes free for use,
+ * then the code needs to update the recipe[r].used_result_idx_bits to
+ * indicate which indexes were selected for use by this recipe.
+ */
+static u16
+ice_find_free_recp_res_idx(struct ice_hw *hw, const unsigned long *profiles,
+ unsigned long *free_idx)
+{
+ DECLARE_BITMAP(possible_idx, ICE_MAX_FV_WORDS);
+ DECLARE_BITMAP(recipes, ICE_MAX_NUM_RECIPES);
+ DECLARE_BITMAP(used_idx, ICE_MAX_FV_WORDS);
+ u16 bit;
+
+ bitmap_zero(possible_idx, ICE_MAX_FV_WORDS);
+ bitmap_zero(recipes, ICE_MAX_NUM_RECIPES);
+ bitmap_zero(used_idx, ICE_MAX_FV_WORDS);
+ bitmap_zero(free_idx, ICE_MAX_FV_WORDS);
+
+ bitmap_set(possible_idx, 0, ICE_MAX_FV_WORDS);
+
+ /* For each profile we are going to associate the recipe with, add the
+ * recipes that are associated with that profile. This will give us
+ * the set of recipes that our recipe may collide with. Also, determine
+ * what possible result indexes are usable given this set of profiles.
+ */
+ for_each_set_bit(bit, profiles, ICE_MAX_NUM_PROFILES) {
+ bitmap_or(recipes, recipes, profile_to_recipe[bit],
+ ICE_MAX_NUM_RECIPES);
+ bitmap_and(possible_idx, possible_idx,
+ hw->switch_info->prof_res_bm[bit],
+ ICE_MAX_FV_WORDS);
+ }
+
+ /* For each recipe that our new recipe may collide with, determine
+ * which indexes have been used.
+ */
+ for_each_set_bit(bit, recipes, ICE_MAX_NUM_RECIPES)
+ bitmap_or(used_idx, used_idx,
+ hw->switch_info->recp_list[bit].res_idxs,
+ ICE_MAX_FV_WORDS);
+
+ bitmap_xor(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS);
+
+ /* return number of free indexes */
+ return (u16)bitmap_weight(free_idx, ICE_MAX_FV_WORDS);
+}
+
+/**
+ * ice_add_sw_recipe - function to call AQ calls to create switch recipe
+ * @hw: pointer to hardware structure
+ * @rm: recipe management list entry
+ * @match_tun_mask: tunnel mask that needs to be programmed
+ * @profiles: bitmap of profiles that will be associated.
+ */
+static enum ice_status
+ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
+ u16 match_tun_mask, unsigned long *profiles)
+{
+ DECLARE_BITMAP(result_idx_bm, ICE_MAX_FV_WORDS);
+ struct ice_aqc_recipe_data_elem *tmp;
+ struct ice_aqc_recipe_data_elem *buf;
+ struct ice_recp_grp_entry *entry;
+ enum ice_status status;
+ u16 free_res_idx;
+ u16 recipe_count;
+ u8 chain_idx;
+ u8 recps = 0;
+
+ /* When more than one recipe are required, another recipe is needed to
+ * chain them together. Matching a tunnel metadata ID takes up one of
+ * the match fields in the chaining recipe reducing the number of
+ * chained recipes by one.
+ */
+ /* check number of free result indices */
+ bitmap_zero(result_idx_bm, ICE_MAX_FV_WORDS);
+ free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm);
+
+ ice_debug(hw, ICE_DBG_SW, "Result idx slots: %d, need %d\n",
+ free_res_idx, rm->n_grp_count);
+
+ if (rm->n_grp_count > 1) {
+ if (rm->n_grp_count > free_res_idx)
+ return ICE_ERR_MAX_LIMIT;
+
+ rm->n_grp_count++;
+ }
+
+ if (rm->n_grp_count > ICE_MAX_CHAIN_RECIPE)
+ return ICE_ERR_MAX_LIMIT;
+
+ tmp = kcalloc(ICE_MAX_NUM_RECIPES, sizeof(*tmp), GFP_KERNEL);
+ if (!tmp)
+ return ICE_ERR_NO_MEMORY;
+
+ buf = devm_kcalloc(ice_hw_to_dev(hw), rm->n_grp_count, sizeof(*buf),
+ GFP_KERNEL);
+ if (!buf) {
+ status = ICE_ERR_NO_MEMORY;
+ goto err_mem;
+ }
+
+ bitmap_zero(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
+ recipe_count = ICE_MAX_NUM_RECIPES;
+ status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC,
+ NULL);
+ if (status || recipe_count == 0)
+ goto err_unroll;
+
+ /* Allocate the recipe resources, and configure them according to the
+ * match fields from protocol headers and extracted field vectors.
+ */
+ chain_idx = find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS);
+ list_for_each_entry(entry, &rm->rg_list, l_entry) {
+ u8 i;
+
+ status = ice_alloc_recipe(hw, &entry->rid);
+ if (status)
+ goto err_unroll;
+
+ /* Clear the result index of the located recipe, as this will be
+ * updated, if needed, later in the recipe creation process.
+ */
+ tmp[0].content.result_indx = 0;
+
+ buf[recps] = tmp[0];
+ buf[recps].recipe_indx = (u8)entry->rid;
+ /* if the recipe is a non-root recipe RID should be programmed
+ * as 0 for the rules to be applied correctly.
+ */
+ buf[recps].content.rid = 0;
+ memset(&buf[recps].content.lkup_indx, 0,
+ sizeof(buf[recps].content.lkup_indx));
+
+ /* All recipes use look-up index 0 to match switch ID. */
+ buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
+ buf[recps].content.mask[0] =
+ cpu_to_le16(ICE_AQ_SW_ID_LKUP_MASK);
+ /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
+ * to be 0
+ */
+ for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
+ buf[recps].content.lkup_indx[i] = 0x80;
+ buf[recps].content.mask[i] = 0;
+ }
+
+ for (i = 0; i < entry->r_group.n_val_pairs; i++) {
+ buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i];
+ buf[recps].content.mask[i + 1] =
+ cpu_to_le16(entry->fv_mask[i]);
+ }
+
+ if (rm->n_grp_count > 1) {
+ /* Checks to see if there really is a valid result index
+ * that can be used.
+ */
+ if (chain_idx >= ICE_MAX_FV_WORDS) {
+ ice_debug(hw, ICE_DBG_SW, "No chain index available\n");
+ status = ICE_ERR_MAX_LIMIT;
+ goto err_unroll;
+ }
+
+ entry->chain_idx = chain_idx;
+ buf[recps].content.result_indx =
+ ICE_AQ_RECIPE_RESULT_EN |
+ ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) &
+ ICE_AQ_RECIPE_RESULT_DATA_M);
+ clear_bit(chain_idx, result_idx_bm);
+ chain_idx = find_first_bit(result_idx_bm,
+ ICE_MAX_FV_WORDS);
+ }
+
+ /* fill recipe dependencies */
+ bitmap_zero((unsigned long *)buf[recps].recipe_bitmap,
+ ICE_MAX_NUM_RECIPES);
+ set_bit(buf[recps].recipe_indx,
+ (unsigned long *)buf[recps].recipe_bitmap);
+ buf[recps].content.act_ctrl_fwd_priority = rm->priority;
+ recps++;
+ }
+
+ if (rm->n_grp_count == 1) {
+ rm->root_rid = buf[0].recipe_indx;
+ set_bit(buf[0].recipe_indx, rm->r_bitmap);
+ buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT;
+ if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) {
+ memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
+ sizeof(buf[0].recipe_bitmap));
+ } else {
+ status = ICE_ERR_BAD_PTR;
+ goto err_unroll;
+ }
+ /* Applicable only for ROOT_RECIPE, set the fwd_priority for
+ * the recipe which is getting created if specified
+ * by user. Usually any advanced switch filter, which results
+ * into new extraction sequence, ended up creating a new recipe
+ * of type ROOT and usually recipes are associated with profiles
+ * Switch rule referreing newly created recipe, needs to have
+ * either/or 'fwd' or 'join' priority, otherwise switch rule
+ * evaluation will not happen correctly. In other words, if
+ * switch rule to be evaluated on priority basis, then recipe
+ * needs to have priority, otherwise it will be evaluated last.
+ */
+ buf[0].content.act_ctrl_fwd_priority = rm->priority;
+ } else {
+ struct ice_recp_grp_entry *last_chain_entry;
+ u16 rid, i;
+
+ /* Allocate the last recipe that will chain the outcomes of the
+ * other recipes together
+ */
+ status = ice_alloc_recipe(hw, &rid);
+ if (status)
+ goto err_unroll;
+
+ buf[recps].recipe_indx = (u8)rid;
+ buf[recps].content.rid = (u8)rid;
+ buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
+ /* the new entry created should also be part of rg_list to
+ * make sure we have complete recipe
+ */
+ last_chain_entry = devm_kzalloc(ice_hw_to_dev(hw),
+ sizeof(*last_chain_entry),
+ GFP_KERNEL);
+ if (!last_chain_entry) {
+ status = ICE_ERR_NO_MEMORY;
+ goto err_unroll;
+ }
+ last_chain_entry->rid = rid;
+ memset(&buf[recps].content.lkup_indx, 0,
+ sizeof(buf[recps].content.lkup_indx));
+ /* All recipes use look-up index 0 to match switch ID. */
+ buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
+ buf[recps].content.mask[0] =
+ cpu_to_le16(ICE_AQ_SW_ID_LKUP_MASK);
+ for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
+ buf[recps].content.lkup_indx[i] =
+ ICE_AQ_RECIPE_LKUP_IGNORE;
+ buf[recps].content.mask[i] = 0;
+ }
+
+ i = 1;
+ /* update r_bitmap with the recp that is used for chaining */
+ set_bit(rid, rm->r_bitmap);
+ /* this is the recipe that chains all the other recipes so it
+ * should not have a chaining ID to indicate the same
+ */
+ last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
+ list_for_each_entry(entry, &rm->rg_list, l_entry) {
+ last_chain_entry->fv_idx[i] = entry->chain_idx;
+ buf[recps].content.lkup_indx[i] = entry->chain_idx;
+ buf[recps].content.mask[i++] = cpu_to_le16(0xFFFF);
+ set_bit(entry->rid, rm->r_bitmap);
+ }
+ list_add(&last_chain_entry->l_entry, &rm->rg_list);
+ if (sizeof(buf[recps].recipe_bitmap) >=
+ sizeof(rm->r_bitmap)) {
+ memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
+ sizeof(buf[recps].recipe_bitmap));
+ } else {
+ status = ICE_ERR_BAD_PTR;
+ goto err_unroll;
+ }
+ buf[recps].content.act_ctrl_fwd_priority = rm->priority;
+
+ /* To differentiate among different UDP tunnels, a meta data ID
+ * flag is used.
+ */
+ if (match_tun_mask) {
+ buf[recps].content.lkup_indx[i] = ICE_TUN_FLAG_FV_IND;
+ buf[recps].content.mask[i] =
+ cpu_to_le16(match_tun_mask);
+ }
+
+ recps++;
+ rm->root_rid = (u8)rid;
+ }
+ status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
+ if (status)
+ goto err_unroll;
+
+ status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL);
+ ice_release_change_lock(hw);
+ if (status)
+ goto err_unroll;
+
+ /* Every recipe that just got created add it to the recipe
+ * book keeping list
+ */
+ list_for_each_entry(entry, &rm->rg_list, l_entry) {
+ struct ice_switch_info *sw = hw->switch_info;
+ bool is_root, idx_found = false;
+ struct ice_sw_recipe *recp;
+ u16 idx, buf_idx = 0;
+
+ /* find buffer index for copying some data */
+ for (idx = 0; idx < rm->n_grp_count; idx++)
+ if (buf[idx].recipe_indx == entry->rid) {
+ buf_idx = idx;
+ idx_found = true;
+ }
+
+ if (!idx_found) {
+ status = ICE_ERR_OUT_OF_RANGE;
+ goto err_unroll;
+ }
+
+ recp = &sw->recp_list[entry->rid];
+ is_root = (rm->root_rid == entry->rid);
+ recp->is_root = is_root;
+
+ recp->root_rid = entry->rid;
+ recp->big_recp = (is_root && rm->n_grp_count > 1);
+
+ memcpy(&recp->ext_words, entry->r_group.pairs,
+ entry->r_group.n_val_pairs * sizeof(struct ice_fv_word));
+
+ memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap,
+ sizeof(recp->r_bitmap));
+
+ /* Copy non-result fv index values and masks to recipe. This
+ * call will also update the result recipe bitmask.
+ */
+ ice_collect_result_idx(&buf[buf_idx], recp);
+
+ /* for non-root recipes, also copy to the root, this allows
+ * easier matching of a complete chained recipe
+ */
+ if (!is_root)
+ ice_collect_result_idx(&buf[buf_idx],
+ &sw->recp_list[rm->root_rid]);
+
+ recp->n_ext_words = entry->r_group.n_val_pairs;
+ recp->chain_idx = entry->chain_idx;
+ recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority;
+ recp->n_grp_count = rm->n_grp_count;
+ recp->recp_created = true;
+ }
+ rm->root_buf = buf;
+ kfree(tmp);
+ return status;
+
+err_unroll:
+err_mem:
+ kfree(tmp);
+ devm_kfree(ice_hw_to_dev(hw), buf);
+ return status;
+}
+
+/**
+ * ice_create_recipe_group - creates recipe group
+ * @hw: pointer to hardware structure
+ * @rm: recipe management list entry
+ * @lkup_exts: lookup elements
+ */
+static enum ice_status
+ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
+ struct ice_prot_lkup_ext *lkup_exts)
+{
+ enum ice_status status;
+ u8 recp_count = 0;
+
+ rm->n_grp_count = 0;
+
+ /* Create recipes for words that are marked not done by packing them
+ * as best fit.
+ */
+ status = ice_create_first_fit_recp_def(hw, lkup_exts,
+ &rm->rg_list, &recp_count);
+ if (!status) {
+ rm->n_grp_count += recp_count;
+ rm->n_ext_words = lkup_exts->n_val_words;
+ memcpy(&rm->ext_words, lkup_exts->fv_words,
+ sizeof(rm->ext_words));
+ memcpy(rm->word_masks, lkup_exts->field_mask,
+ sizeof(rm->word_masks));
+ }
+
+ return status;
+}
+
+/**
+ * ice_get_fv - get field vectors/extraction sequences for spec. lookup types
+ * @hw: pointer to hardware structure
+ * @lkups: lookup elements or match criteria for the advanced recipe, one
+ * structure per protocol header
+ * @lkups_cnt: number of protocols
+ * @bm: bitmap of field vectors to consider
+ * @fv_list: pointer to a list that holds the returned field vectors
+ */
+static enum ice_status
+ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
+ unsigned long *bm, struct list_head *fv_list)
+{
+ enum ice_status status;
+ u8 *prot_ids;
+ u16 i;
+
+ prot_ids = kcalloc(lkups_cnt, sizeof(*prot_ids), GFP_KERNEL);
+ if (!prot_ids)
+ return ICE_ERR_NO_MEMORY;
+
+ for (i = 0; i < lkups_cnt; i++)
+ if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) {
+ status = ICE_ERR_CFG;
+ goto free_mem;
+ }
+
+ /* Find field vectors that include all specified protocol types */
+ status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, bm, fv_list);
+
+free_mem:
+ kfree(prot_ids);
+ return status;
+}
+
+/* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule
+ * @hw: pointer to hardware structure
+ * @rinfo: other information regarding the rule e.g. priority and action info
+ * @bm: pointer to memory for returning the bitmap of field vectors
+ */
+static void
+ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
+ unsigned long *bm)
+{
+ bitmap_zero(bm, ICE_MAX_NUM_PROFILES);
+
+ ice_get_sw_fv_bitmap(hw, ICE_PROF_NON_TUN, bm);
+}
+
+/**
+ * ice_add_adv_recipe - Add an advanced recipe that is not part of the default
+ * @hw: pointer to hardware structure
+ * @lkups: lookup elements or match criteria for the advanced recipe, one
+ * structure per protocol header
+ * @lkups_cnt: number of protocols
+ * @rinfo: other information regarding the rule e.g. priority and action info
+ * @rid: return the recipe ID of the recipe created
+ */
+static enum ice_status
+ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
+ u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid)
+{
+ DECLARE_BITMAP(fv_bitmap, ICE_MAX_NUM_PROFILES);
+ DECLARE_BITMAP(profiles, ICE_MAX_NUM_PROFILES);
+ struct ice_prot_lkup_ext *lkup_exts;
+ struct ice_recp_grp_entry *r_entry;
+ struct ice_sw_fv_list_entry *fvit;
+ struct ice_recp_grp_entry *r_tmp;
+ struct ice_sw_fv_list_entry *tmp;
+ enum ice_status status = 0;
+ struct ice_sw_recipe *rm;
+ u16 match_tun_mask = 0;
+ u8 i;
+
+ if (!lkups_cnt)
+ return ICE_ERR_PARAM;
+
+ lkup_exts = kzalloc(sizeof(*lkup_exts), GFP_KERNEL);
+ if (!lkup_exts)
+ return ICE_ERR_NO_MEMORY;
+
+ /* Determine the number of words to be matched and if it exceeds a
+ * recipe's restrictions
+ */
+ for (i = 0; i < lkups_cnt; i++) {
+ u16 count;
+
+ if (lkups[i].type >= ICE_PROTOCOL_LAST) {
+ status = ICE_ERR_CFG;
+ goto err_free_lkup_exts;
+ }
+
+ count = ice_fill_valid_words(&lkups[i], lkup_exts);
+ if (!count) {
+ status = ICE_ERR_CFG;
+ goto err_free_lkup_exts;
+ }
+ }
+
+ rm = kzalloc(sizeof(*rm), GFP_KERNEL);
+ if (!rm) {
+ status = ICE_ERR_NO_MEMORY;
+ goto err_free_lkup_exts;
+ }
+
+ /* Get field vectors that contain fields extracted from all the protocol
+ * headers being programmed.
+ */
+ INIT_LIST_HEAD(&rm->fv_list);
+ INIT_LIST_HEAD(&rm->rg_list);
+
+ /* Get bitmap of field vectors (profiles) that are compatible with the
+ * rule request; only these will be searched in the subsequent call to
+ * ice_get_fv.
+ */
+ ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap);
+
+ status = ice_get_fv(hw, lkups, lkups_cnt, fv_bitmap, &rm->fv_list);
+ if (status)
+ goto err_unroll;
+
+ /* Group match words into recipes using preferred recipe grouping
+ * criteria.
+ */
+ status = ice_create_recipe_group(hw, rm, lkup_exts);
+ if (status)
+ goto err_unroll;
+
+ /* set the recipe priority if specified */
+ rm->priority = (u8)rinfo->priority;
+
+ /* Find offsets from the field vector. Pick the first one for all the
+ * recipes.
+ */
+ status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list);
+ if (status)
+ goto err_unroll;
+
+ /* get bitmap of all profiles the recipe will be associated with */
+ bitmap_zero(profiles, ICE_MAX_NUM_PROFILES);
+ list_for_each_entry(fvit, &rm->fv_list, list_entry) {
+ ice_debug(hw, ICE_DBG_SW, "profile: %d\n", fvit->profile_id);
+ set_bit((u16)fvit->profile_id, profiles);
+ }
+
+ /* Look for a recipe which matches our requested fv / mask list */
+ *rid = ice_find_recp(hw, lkup_exts);
+ if (*rid < ICE_MAX_NUM_RECIPES)
+ /* Success if found a recipe that match the existing criteria */
+ goto err_unroll;
+
+ /* Recipe we need does not exist, add a recipe */
+ status = ice_add_sw_recipe(hw, rm, match_tun_mask, profiles);
+ if (status)
+ goto err_unroll;
+
+ /* Associate all the recipes created with all the profiles in the
+ * common field vector.
+ */
+ list_for_each_entry(fvit, &rm->fv_list, list_entry) {
+ DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES);
+ u16 j;
+
+ status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
+ (u8 *)r_bitmap, NULL);
+ if (status)
+ goto err_unroll;
+
+ bitmap_or(r_bitmap, r_bitmap, rm->r_bitmap,
+ ICE_MAX_NUM_RECIPES);
+ status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
+ if (status)
+ goto err_unroll;
+
+ status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
+ (u8 *)r_bitmap,
+ NULL);
+ ice_release_change_lock(hw);
+
+ if (status)
+ goto err_unroll;
+
+ /* Update profile to recipe bitmap array */
+ bitmap_copy(profile_to_recipe[fvit->profile_id], r_bitmap,
+ ICE_MAX_NUM_RECIPES);
+
+ /* Update recipe to profile bitmap array */
+ for_each_set_bit(j, rm->r_bitmap, ICE_MAX_NUM_RECIPES)
+ set_bit((u16)fvit->profile_id, recipe_to_profile[j]);
+ }
+
+ *rid = rm->root_rid;
+ memcpy(&hw->switch_info->recp_list[*rid].lkup_exts, lkup_exts,
+ sizeof(*lkup_exts));
+err_unroll:
+ list_for_each_entry_safe(r_entry, r_tmp, &rm->rg_list, l_entry) {
+ list_del(&r_entry->l_entry);
+ devm_kfree(ice_hw_to_dev(hw), r_entry);
+ }
+
+ list_for_each_entry_safe(fvit, tmp, &rm->fv_list, list_entry) {
+ list_del(&fvit->list_entry);
+ devm_kfree(ice_hw_to_dev(hw), fvit);
+ }
+
+ if (rm->root_buf)
+ devm_kfree(ice_hw_to_dev(hw), rm->root_buf);
+
+ kfree(rm);
+
+err_free_lkup_exts:
+ kfree(lkup_exts);
+
+ return status;
+}
+
+/**
+ * ice_find_dummy_packet - find dummy packet
+ *
+ * @lkups: lookup elements or match criteria for the advanced recipe, one
+ * structure per protocol header
+ * @lkups_cnt: number of protocols
+ * @pkt: dummy packet to fill according to filter match criteria
+ * @pkt_len: packet length of dummy packet
+ * @offsets: pointer to receive the pointer to the offsets for the packet
+ */
+static void
+ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
+ const u8 **pkt, u16 *pkt_len,
+ const struct ice_dummy_pkt_offsets **offsets)
+{
+ bool tcp = false, udp = false, ipv6 = false, vlan = false;
+ u16 i;
+
+ for (i = 0; i < lkups_cnt; i++) {
+ if (lkups[i].type == ICE_UDP_ILOS)
+ udp = true;
+ else if (lkups[i].type == ICE_TCP_IL)
+ tcp = true;
+ else if (lkups[i].type == ICE_IPV6_OFOS)
+ ipv6 = true;
+ else if (lkups[i].type == ICE_VLAN_OFOS)
+ vlan = true;
+ else if (lkups[i].type == ICE_ETYPE_OL &&
+ lkups[i].h_u.ethertype.ethtype_id ==
+ cpu_to_be16(ICE_IPV6_ETHER_ID) &&
+ lkups[i].m_u.ethertype.ethtype_id ==
+ cpu_to_be16(0xFFFF))
+ ipv6 = true;
+ }
+
+ if (udp && !ipv6) {
+ if (vlan) {
+ *pkt = dummy_vlan_udp_packet;
+ *pkt_len = sizeof(dummy_vlan_udp_packet);
+ *offsets = dummy_vlan_udp_packet_offsets;
+ return;
+ }
+ *pkt = dummy_udp_packet;
+ *pkt_len = sizeof(dummy_udp_packet);
+ *offsets = dummy_udp_packet_offsets;
+ return;
+ } else if (udp && ipv6) {
+ if (vlan) {
+ *pkt = dummy_vlan_udp_ipv6_packet;
+ *pkt_len = sizeof(dummy_vlan_udp_ipv6_packet);
+ *offsets = dummy_vlan_udp_ipv6_packet_offsets;
+ return;
+ }
+ *pkt = dummy_udp_ipv6_packet;
+ *pkt_len = sizeof(dummy_udp_ipv6_packet);
+ *offsets = dummy_udp_ipv6_packet_offsets;
+ return;
+ } else if ((tcp && ipv6) || ipv6) {
+ if (vlan) {
+ *pkt = dummy_vlan_tcp_ipv6_packet;
+ *pkt_len = sizeof(dummy_vlan_tcp_ipv6_packet);
+ *offsets = dummy_vlan_tcp_ipv6_packet_offsets;
+ return;
+ }
+ *pkt = dummy_tcp_ipv6_packet;
+ *pkt_len = sizeof(dummy_tcp_ipv6_packet);
+ *offsets = dummy_tcp_ipv6_packet_offsets;
+ return;
+ }
+
+ if (vlan) {
+ *pkt = dummy_vlan_tcp_packet;
+ *pkt_len = sizeof(dummy_vlan_tcp_packet);
+ *offsets = dummy_vlan_tcp_packet_offsets;
+ } else {
+ *pkt = dummy_tcp_packet;
+ *pkt_len = sizeof(dummy_tcp_packet);
+ *offsets = dummy_tcp_packet_offsets;
+ }
+}
+
+/**
+ * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria
+ *
+ * @lkups: lookup elements or match criteria for the advanced recipe, one
+ * structure per protocol header
+ * @lkups_cnt: number of protocols
+ * @s_rule: stores rule information from the match criteria
+ * @dummy_pkt: dummy packet to fill according to filter match criteria
+ * @pkt_len: packet length of dummy packet
+ * @offsets: offset info for the dummy packet
+ */
+static enum ice_status
+ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
+ struct ice_aqc_sw_rules_elem *s_rule,
+ const u8 *dummy_pkt, u16 pkt_len,
+ const struct ice_dummy_pkt_offsets *offsets)
+{
+ u8 *pkt;
+ u16 i;
+
+ /* Start with a packet with a pre-defined/dummy content. Then, fill
+ * in the header values to be looked up or matched.
+ */
+ pkt = s_rule->pdata.lkup_tx_rx.hdr;
+
+ memcpy(pkt, dummy_pkt, pkt_len);
+
+ for (i = 0; i < lkups_cnt; i++) {
+ enum ice_protocol_type type;
+ u16 offset = 0, len = 0, j;
+ bool found = false;
+
+ /* find the start of this layer; it should be found since this
+ * was already checked when search for the dummy packet
+ */
+ type = lkups[i].type;
+ for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) {
+ if (type == offsets[j].type) {
+ offset = offsets[j].offset;
+ found = true;
+ break;
+ }
+ }
+ /* this should never happen in a correct calling sequence */
+ if (!found)
+ return ICE_ERR_PARAM;
+
+ switch (lkups[i].type) {
+ case ICE_MAC_OFOS:
+ case ICE_MAC_IL:
+ len = sizeof(struct ice_ether_hdr);
+ break;
+ case ICE_ETYPE_OL:
+ len = sizeof(struct ice_ethtype_hdr);
+ break;
+ case ICE_VLAN_OFOS:
+ len = sizeof(struct ice_vlan_hdr);
+ break;
+ case ICE_IPV4_OFOS:
+ case ICE_IPV4_IL:
+ len = sizeof(struct ice_ipv4_hdr);
+ break;
+ case ICE_IPV6_OFOS:
+ case ICE_IPV6_IL:
+ len = sizeof(struct ice_ipv6_hdr);
+ break;
+ case ICE_TCP_IL:
+ case ICE_UDP_OF:
+ case ICE_UDP_ILOS:
+ len = sizeof(struct ice_l4_hdr);
+ break;
+ case ICE_SCTP_IL:
+ len = sizeof(struct ice_sctp_hdr);
+ break;
+ default:
+ return ICE_ERR_PARAM;
+ }
+
+ /* the length should be a word multiple */
+ if (len % ICE_BYTES_PER_WORD)
+ return ICE_ERR_CFG;
+
+ /* We have the offset to the header start, the length, the
+ * caller's header values and mask. Use this information to
+ * copy the data into the dummy packet appropriately based on
+ * the mask. Note that we need to only write the bits as
+ * indicated by the mask to make sure we don't improperly write
+ * over any significant packet data.
+ */
+ for (j = 0; j < len / sizeof(u16); j++)
+ if (((u16 *)&lkups[i].m_u)[j])
+ ((u16 *)(pkt + offset))[j] =
+ (((u16 *)(pkt + offset))[j] &
+ ~((u16 *)&lkups[i].m_u)[j]) |
+ (((u16 *)&lkups[i].h_u)[j] &
+ ((u16 *)&lkups[i].m_u)[j]);
+ }
+
+ s_rule->pdata.lkup_tx_rx.hdr_len = cpu_to_le16(pkt_len);
+
+ return 0;
+}
+
+/**
+ * ice_find_adv_rule_entry - Search a rule entry
+ * @hw: pointer to the hardware structure
+ * @lkups: lookup elements or match criteria for the advanced recipe, one
+ * structure per protocol header
+ * @lkups_cnt: number of protocols
+ * @recp_id: recipe ID for which we are finding the rule
+ * @rinfo: other information regarding the rule e.g. priority and action info
+ *
+ * Helper function to search for a given advance rule entry
+ * Returns pointer to entry storing the rule if found
+ */
+static struct ice_adv_fltr_mgmt_list_entry *
+ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
+ u16 lkups_cnt, u16 recp_id,
+ struct ice_adv_rule_info *rinfo)
+{
+ struct ice_adv_fltr_mgmt_list_entry *list_itr;
+ struct ice_switch_info *sw = hw->switch_info;
+ int i;
+
+ list_for_each_entry(list_itr, &sw->recp_list[recp_id].filt_rules,
+ list_entry) {
+ bool lkups_matched = true;
+
+ if (lkups_cnt != list_itr->lkups_cnt)
+ continue;
+ for (i = 0; i < list_itr->lkups_cnt; i++)
+ if (memcmp(&list_itr->lkups[i], &lkups[i],
+ sizeof(*lkups))) {
+ lkups_matched = false;
+ break;
+ }
+ if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag &&
+ lkups_matched)
+ return list_itr;
+ }
+ return NULL;
+}
+
+/**
+ * ice_adv_add_update_vsi_list
+ * @hw: pointer to the hardware structure
+ * @m_entry: pointer to current adv filter management list entry
+ * @cur_fltr: filter information from the book keeping entry
+ * @new_fltr: filter information with the new VSI to be added
+ *
+ * Call AQ command to add or update previously created VSI list with new VSI.
+ *
+ * Helper function to do book keeping associated with adding filter information
+ * The algorithm to do the booking keeping is described below :
+ * When a VSI needs to subscribe to a given advanced filter
+ * if only one VSI has been added till now
+ * Allocate a new VSI list and add two VSIs
+ * to this list using switch rule command
+ * Update the previously created switch rule with the
+ * newly created VSI list ID
+ * if a VSI list was previously created
+ * Add the new VSI to the previously created VSI list set
+ * using the update switch rule command
+ */
+static enum ice_status
+ice_adv_add_update_vsi_list(struct ice_hw *hw,
+ struct ice_adv_fltr_mgmt_list_entry *m_entry,
+ struct ice_adv_rule_info *cur_fltr,
+ struct ice_adv_rule_info *new_fltr)
+{
+ enum ice_status status;
+ u16 vsi_list_id = 0;
+
+ if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
+ cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
+ cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
+ return ICE_ERR_NOT_IMPL;
+
+ if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
+ new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
+ (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
+ cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
+ return ICE_ERR_NOT_IMPL;
+
+ if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
+ /* Only one entry existed in the mapping and it was not already
+ * a part of a VSI list. So, create a VSI list with the old and
+ * new VSIs.
+ */
+ struct ice_fltr_info tmp_fltr;
+ u16 vsi_handle_arr[2];
+
+ /* A rule already exists with the new VSI being added */
+ if (cur_fltr->sw_act.fwd_id.hw_vsi_id ==
+ new_fltr->sw_act.fwd_id.hw_vsi_id)
+ return ICE_ERR_ALREADY_EXISTS;
+
+ vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle;
+ vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle;
+ status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
+ &vsi_list_id,
+ ICE_SW_LKUP_LAST);
+ if (status)
+ return status;
+
+ memset(&tmp_fltr, 0, sizeof(tmp_fltr));
+ tmp_fltr.flag = m_entry->rule_info.sw_act.flag;
+ tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
+ tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
+ tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
+ tmp_fltr.lkup_type = ICE_SW_LKUP_LAST;
+
+ /* Update the previous switch rule of "forward to VSI" to
+ * "fwd to VSI list"
+ */
+ status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
+ if (status)
+ return status;
+
+ cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id;
+ cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST;
+ m_entry->vsi_list_info =
+ ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
+ vsi_list_id);
+ } else {
+ u16 vsi_handle = new_fltr->sw_act.vsi_handle;
+
+ if (!m_entry->vsi_list_info)
+ return ICE_ERR_CFG;
+
+ /* A rule already exists with the new VSI being added */
+ if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map))
+ return 0;
+
+ /* Update the previously created VSI list set with
+ * the new VSI ID passed in
+ */
+ vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id;
+
+ status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
+ vsi_list_id, false,
+ ice_aqc_opc_update_sw_rules,
+ ICE_SW_LKUP_LAST);
+ /* update VSI list mapping info with new VSI ID */
+ if (!status)
+ set_bit(vsi_handle, m_entry->vsi_list_info->vsi_map);
+ }
+ if (!status)
+ m_entry->vsi_count++;
+ return status;
+}
+
+/**
+ * ice_add_adv_rule - helper function to create an advanced switch rule
+ * @hw: pointer to the hardware structure
+ * @lkups: information on the words that needs to be looked up. All words
+ * together makes one recipe
+ * @lkups_cnt: num of entries in the lkups array
+ * @rinfo: other information related to the rule that needs to be programmed
+ * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be
+ * ignored is case of error.
+ *
+ * This function can program only 1 rule at a time. The lkups is used to
+ * describe the all the words that forms the "lookup" portion of the recipe.
+ * These words can span multiple protocols. Callers to this function need to
+ * pass in a list of protocol headers with lookup information along and mask
+ * that determines which words are valid from the given protocol header.
+ * rinfo describes other information related to this rule such as forwarding
+ * IDs, priority of this rule, etc.
+ */
+enum ice_status
+ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
+ u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
+ struct ice_rule_query_data *added_entry)
+{
+ struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
+ u16 rid = 0, i, pkt_len, rule_buf_sz, vsi_handle;
+ const struct ice_dummy_pkt_offsets *pkt_offsets;
+ struct ice_aqc_sw_rules_elem *s_rule = NULL;
+ struct list_head *rule_head;
+ struct ice_switch_info *sw;
+ enum ice_status status;
+ const u8 *pkt = NULL;
+ u16 word_cnt;
+ u32 act = 0;
+ u8 q_rgn;
+
+ /* Initialize profile to result index bitmap */
+ if (!hw->switch_info->prof_res_bm_init) {
+ hw->switch_info->prof_res_bm_init = 1;
+ ice_init_prof_result_bm(hw);
+ }
+
+ if (!lkups_cnt)
+ return ICE_ERR_PARAM;
+
+ /* get # of words we need to match */
+ word_cnt = 0;
+ for (i = 0; i < lkups_cnt; i++) {
+ u16 j, *ptr;
+
+ ptr = (u16 *)&lkups[i].m_u;
+ for (j = 0; j < sizeof(lkups->m_u) / sizeof(u16); j++)
+ if (ptr[j] != 0)
+ word_cnt++;
+ }
+
+ if (!word_cnt || word_cnt > ICE_MAX_CHAIN_WORDS)
+ return ICE_ERR_PARAM;
+
+ /* make sure that we can locate a dummy packet */
+ ice_find_dummy_packet(lkups, lkups_cnt, &pkt, &pkt_len,
+ &pkt_offsets);
+ if (!pkt) {
+ status = ICE_ERR_PARAM;
+ goto err_ice_add_adv_rule;
+ }
+
+ if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
+ rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
+ rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
+ rinfo->sw_act.fltr_act == ICE_DROP_PACKET))
+ return ICE_ERR_CFG;
+
+ vsi_handle = rinfo->sw_act.vsi_handle;
+ if (!ice_is_vsi_valid(hw, vsi_handle))
+ return ICE_ERR_PARAM;
+
+ if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
+ rinfo->sw_act.fwd_id.hw_vsi_id =
+ ice_get_hw_vsi_num(hw, vsi_handle);
+ if (rinfo->sw_act.flag & ICE_FLTR_TX)
+ rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle);
+
+ status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid);
+ if (status)
+ return status;
+ m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
+ if (m_entry) {
+ /* we have to add VSI to VSI_LIST and increment vsi_count.
+ * Also Update VSI list so that we can change forwarding rule
+ * if the rule already exists, we will check if it exists with
+ * same vsi_id, if not then add it to the VSI list if it already
+ * exists if not then create a VSI list and add the existing VSI
+ * ID and the new VSI ID to the list
+ * We will add that VSI to the list
+ */
+ status = ice_adv_add_update_vsi_list(hw, m_entry,
+ &m_entry->rule_info,
+ rinfo);
+ if (added_entry) {
+ added_entry->rid = rid;
+ added_entry->rule_id = m_entry->rule_info.fltr_rule_id;
+ added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
+ }
+ return status;
+ }
+ rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
+ s_rule = kzalloc(rule_buf_sz, GFP_KERNEL);
+ if (!s_rule)
+ return ICE_ERR_NO_MEMORY;
+ if (!rinfo->flags_info.act_valid) {
+ act |= ICE_SINGLE_ACT_LAN_ENABLE;
+ act |= ICE_SINGLE_ACT_LB_ENABLE;
+ } else {
+ act |= rinfo->flags_info.act & (ICE_SINGLE_ACT_LAN_ENABLE |
+ ICE_SINGLE_ACT_LB_ENABLE);
+ }
+
+ switch (rinfo->sw_act.fltr_act) {
+ case ICE_FWD_TO_VSI:
+ act |= (rinfo->sw_act.fwd_id.hw_vsi_id <<
+ ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M;
+ act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
+ break;
+ case ICE_FWD_TO_Q:
+ act |= ICE_SINGLE_ACT_TO_Q;
+ act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
+ ICE_SINGLE_ACT_Q_INDEX_M;
+ break;
+ case ICE_FWD_TO_QGRP:
+ q_rgn = rinfo->sw_act.qgrp_size > 0 ?
+ (u8)ilog2(rinfo->sw_act.qgrp_size) : 0;
+ act |= ICE_SINGLE_ACT_TO_Q;
+ act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
+ ICE_SINGLE_ACT_Q_INDEX_M;
+ act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
+ ICE_SINGLE_ACT_Q_REGION_M;
+ break;
+ case ICE_DROP_PACKET:
+ act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
+ ICE_SINGLE_ACT_VALID_BIT;
+ break;
+ default:
+ status = ICE_ERR_CFG;
+ goto err_ice_add_adv_rule;
+ }
+
+ /* set the rule LOOKUP type based on caller specified 'Rx'
+ * instead of hardcoding it to be either LOOKUP_TX/RX
+ *
+ * for 'Rx' set the source to be the port number
+ * for 'Tx' set the source to be the source HW VSI number (determined
+ * by caller)
+ */
+ if (rinfo->rx) {
+ s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX);
+ s_rule->pdata.lkup_tx_rx.src =
+ cpu_to_le16(hw->port_info->lport);
+ } else {
+ s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX);
+ s_rule->pdata.lkup_tx_rx.src = cpu_to_le16(rinfo->sw_act.src);
+ }
+
+ s_rule->pdata.lkup_tx_rx.recipe_id = cpu_to_le16(rid);
+ s_rule->pdata.lkup_tx_rx.act = cpu_to_le32(act);
+
+ status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, pkt,
+ pkt_len, pkt_offsets);
+ if (status)
+ goto err_ice_add_adv_rule;
+
+ status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
+ rule_buf_sz, 1, ice_aqc_opc_add_sw_rules,
+ NULL);
+ if (status)
+ goto err_ice_add_adv_rule;
+ adv_fltr = devm_kzalloc(ice_hw_to_dev(hw),
+ sizeof(struct ice_adv_fltr_mgmt_list_entry),
+ GFP_KERNEL);
+ if (!adv_fltr) {
+ status = ICE_ERR_NO_MEMORY;
+ goto err_ice_add_adv_rule;
+ }
+
+ adv_fltr->lkups = devm_kmemdup(ice_hw_to_dev(hw), lkups,
+ lkups_cnt * sizeof(*lkups), GFP_KERNEL);
+ if (!adv_fltr->lkups) {
+ status = ICE_ERR_NO_MEMORY;
+ goto err_ice_add_adv_rule;
+ }
+
+ adv_fltr->lkups_cnt = lkups_cnt;
+ adv_fltr->rule_info = *rinfo;
+ adv_fltr->rule_info.fltr_rule_id =
+ le16_to_cpu(s_rule->pdata.lkup_tx_rx.index);
+ sw = hw->switch_info;
+ sw->recp_list[rid].adv_rule = true;
+ rule_head = &sw->recp_list[rid].filt_rules;
+
+ if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
+ adv_fltr->vsi_count = 1;
+
+ /* Add rule entry to book keeping list */
+ list_add(&adv_fltr->list_entry, rule_head);
+ if (added_entry) {
+ added_entry->rid = rid;
+ added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id;
+ added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
+ }
+err_ice_add_adv_rule:
+ if (status && adv_fltr) {
+ devm_kfree(ice_hw_to_dev(hw), adv_fltr->lkups);
+ devm_kfree(ice_hw_to_dev(hw), adv_fltr);
+ }
+
+ kfree(s_rule);
+
+ return status;
+}
+
/**
* ice_replay_vsi_fltr - Replay filters for requested VSI
* @hw: pointer to the hardware structure
@@ -2831,6 +5070,229 @@ end:
}
/**
+ * ice_adv_rem_update_vsi_list
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: VSI handle of the VSI to remove
+ * @fm_list: filter management entry for which the VSI list management needs to
+ * be done
+ */
+static enum ice_status
+ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
+ struct ice_adv_fltr_mgmt_list_entry *fm_list)
+{
+ struct ice_vsi_list_map_info *vsi_list_info;
+ enum ice_sw_lkup_type lkup_type;
+ enum ice_status status;
+ u16 vsi_list_id;
+
+ if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST ||
+ fm_list->vsi_count == 0)
+ return ICE_ERR_PARAM;
+
+ /* A rule with the VSI being removed does not exist */
+ if (!test_bit(vsi_handle, fm_list->vsi_list_info->vsi_map))
+ return ICE_ERR_DOES_NOT_EXIST;
+
+ lkup_type = ICE_SW_LKUP_LAST;
+ vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id;
+ status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
+ ice_aqc_opc_update_sw_rules,
+ lkup_type);
+ if (status)
+ return status;
+
+ fm_list->vsi_count--;
+ clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
+ vsi_list_info = fm_list->vsi_list_info;
+ if (fm_list->vsi_count == 1) {
+ struct ice_fltr_info tmp_fltr;
+ u16 rem_vsi_handle;
+
+ rem_vsi_handle = find_first_bit(vsi_list_info->vsi_map,
+ ICE_MAX_VSI);
+ if (!ice_is_vsi_valid(hw, rem_vsi_handle))
+ return ICE_ERR_OUT_OF_RANGE;
+
+ /* Make sure VSI list is empty before removing it below */
+ status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
+ vsi_list_id, true,
+ ice_aqc_opc_update_sw_rules,
+ lkup_type);
+ if (status)
+ return status;
+
+ memset(&tmp_fltr, 0, sizeof(tmp_fltr));
+ tmp_fltr.flag = fm_list->rule_info.sw_act.flag;
+ tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id;
+ fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
+ tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
+ tmp_fltr.fwd_id.hw_vsi_id =
+ ice_get_hw_vsi_num(hw, rem_vsi_handle);
+ fm_list->rule_info.sw_act.fwd_id.hw_vsi_id =
+ ice_get_hw_vsi_num(hw, rem_vsi_handle);
+ fm_list->rule_info.sw_act.vsi_handle = rem_vsi_handle;
+
+ /* Update the previous switch rule of "MAC forward to VSI" to
+ * "MAC fwd to VSI list"
+ */
+ status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
+ if (status) {
+ ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
+ tmp_fltr.fwd_id.hw_vsi_id, status);
+ return status;
+ }
+ fm_list->vsi_list_info->ref_cnt--;
+
+ /* Remove the VSI list since it is no longer used */
+ status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
+ if (status) {
+ ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n",
+ vsi_list_id, status);
+ return status;
+ }
+
+ list_del(&vsi_list_info->list_entry);
+ devm_kfree(ice_hw_to_dev(hw), vsi_list_info);
+ fm_list->vsi_list_info = NULL;
+ }
+
+ return status;
+}
+
+/**
+ * ice_rem_adv_rule - removes existing advanced switch rule
+ * @hw: pointer to the hardware structure
+ * @lkups: information on the words that needs to be looked up. All words
+ * together makes one recipe
+ * @lkups_cnt: num of entries in the lkups array
+ * @rinfo: Its the pointer to the rule information for the rule
+ *
+ * This function can be used to remove 1 rule at a time. The lkups is
+ * used to describe all the words that forms the "lookup" portion of the
+ * rule. These words can span multiple protocols. Callers to this function
+ * need to pass in a list of protocol headers with lookup information along
+ * and mask that determines which words are valid from the given protocol
+ * header. rinfo describes other information related to this rule such as
+ * forwarding IDs, priority of this rule, etc.
+ */
+static enum ice_status
+ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
+ u16 lkups_cnt, struct ice_adv_rule_info *rinfo)
+{
+ struct ice_adv_fltr_mgmt_list_entry *list_elem;
+ struct ice_prot_lkup_ext lkup_exts;
+ enum ice_status status = 0;
+ bool remove_rule = false;
+ struct mutex *rule_lock; /* Lock to protect filter rule list */
+ u16 i, rid, vsi_handle;
+
+ memset(&lkup_exts, 0, sizeof(lkup_exts));
+ for (i = 0; i < lkups_cnt; i++) {
+ u16 count;
+
+ if (lkups[i].type >= ICE_PROTOCOL_LAST)
+ return ICE_ERR_CFG;
+
+ count = ice_fill_valid_words(&lkups[i], &lkup_exts);
+ if (!count)
+ return ICE_ERR_CFG;
+ }
+
+ rid = ice_find_recp(hw, &lkup_exts);
+ /* If did not find a recipe that match the existing criteria */
+ if (rid == ICE_MAX_NUM_RECIPES)
+ return ICE_ERR_PARAM;
+
+ rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock;
+ list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
+ /* the rule is already removed */
+ if (!list_elem)
+ return 0;
+ mutex_lock(rule_lock);
+ if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) {
+ remove_rule = true;
+ } else if (list_elem->vsi_count > 1) {
+ remove_rule = false;
+ vsi_handle = rinfo->sw_act.vsi_handle;
+ status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
+ } else {
+ vsi_handle = rinfo->sw_act.vsi_handle;
+ status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
+ if (status) {
+ mutex_unlock(rule_lock);
+ return status;
+ }
+ if (list_elem->vsi_count == 0)
+ remove_rule = true;
+ }
+ mutex_unlock(rule_lock);
+ if (remove_rule) {
+ struct ice_aqc_sw_rules_elem *s_rule;
+ u16 rule_buf_sz;
+
+ rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
+ s_rule = kzalloc(rule_buf_sz, GFP_KERNEL);
+ if (!s_rule)
+ return ICE_ERR_NO_MEMORY;
+ s_rule->pdata.lkup_tx_rx.act = 0;
+ s_rule->pdata.lkup_tx_rx.index =
+ cpu_to_le16(list_elem->rule_info.fltr_rule_id);
+ s_rule->pdata.lkup_tx_rx.hdr_len = 0;
+ status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
+ rule_buf_sz, 1,
+ ice_aqc_opc_remove_sw_rules, NULL);
+ if (!status || status == ICE_ERR_DOES_NOT_EXIST) {
+ struct ice_switch_info *sw = hw->switch_info;
+
+ mutex_lock(rule_lock);
+ list_del(&list_elem->list_entry);
+ devm_kfree(ice_hw_to_dev(hw), list_elem->lkups);
+ devm_kfree(ice_hw_to_dev(hw), list_elem);
+ mutex_unlock(rule_lock);
+ if (list_empty(&sw->recp_list[rid].filt_rules))
+ sw->recp_list[rid].adv_rule = false;
+ }
+ kfree(s_rule);
+ }
+ return status;
+}
+
+/**
+ * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID
+ * @hw: pointer to the hardware structure
+ * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID
+ *
+ * This function is used to remove 1 rule at a time. The removal is based on
+ * the remove_entry parameter. This function will remove rule for a given
+ * vsi_handle with a given rule_id which is passed as parameter in remove_entry
+ */
+enum ice_status
+ice_rem_adv_rule_by_id(struct ice_hw *hw,
+ struct ice_rule_query_data *remove_entry)
+{
+ struct ice_adv_fltr_mgmt_list_entry *list_itr;
+ struct list_head *list_head;
+ struct ice_adv_rule_info rinfo;
+ struct ice_switch_info *sw;
+
+ sw = hw->switch_info;
+ if (!sw->recp_list[remove_entry->rid].recp_created)
+ return ICE_ERR_PARAM;
+ list_head = &sw->recp_list[remove_entry->rid].filt_rules;
+ list_for_each_entry(list_itr, list_head, list_entry) {
+ if (list_itr->rule_info.fltr_rule_id ==
+ remove_entry->rule_id) {
+ rinfo = list_itr->rule_info;
+ rinfo.sw_act.vsi_handle = remove_entry->vsi_handle;
+ return ice_rem_adv_rule(hw, list_itr->lkups,
+ list_itr->lkups_cnt, &rinfo);
+ }
+ }
+ /* either list is empty or unable to find rule */
+ return ICE_ERR_DOES_NOT_EXIST;
+}
+
+/**
* ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
* @hw: pointer to the hardware structure
* @vsi_handle: driver VSI handle
@@ -2868,12 +5330,15 @@ void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
if (!sw)
return;
- for (i = 0; i < ICE_SW_LKUP_LAST; i++) {
+ for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
if (!list_empty(&sw->recp_list[i].filt_replay_rules)) {
struct list_head *l_head;
l_head = &sw->recp_list[i].filt_replay_rules;
- ice_rem_sw_rule_info(hw, l_head);
+ if (!sw->recp_list[i].adv_rule)
+ ice_rem_sw_rule_info(hw, l_head);
+ else
+ ice_rem_adv_rule_info(hw, l_head);
}
}
}
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h
index c5db8d56133f..c4dd2062c469 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.h
+++ b/drivers/net/ethernet/intel/ice/ice_switch.h
@@ -14,6 +14,9 @@
#define ICE_VSI_INVAL_ID 0xffff
#define ICE_INVAL_Q_HANDLE 0xFFFF
+#define ICE_SW_RULE_RX_TX_NO_HDR_SIZE \
+ (offsetof(struct ice_aqc_sw_rules_elem, pdata.lkup_tx_rx.hdr))
+
/* VSI context structure for add/get/update/free operations */
struct ice_vsi_ctx {
u16 vsi_num;
@@ -122,30 +125,121 @@ struct ice_fltr_info {
u8 lan_en; /* Indicate if packet can be forwarded to the uplink */
};
+struct ice_adv_lkup_elem {
+ enum ice_protocol_type type;
+ union ice_prot_hdr h_u; /* Header values */
+ union ice_prot_hdr m_u; /* Mask of header values to match */
+};
+
+struct ice_sw_act_ctrl {
+ /* Source VSI for LOOKUP_TX or source port for LOOKUP_RX */
+ u16 src;
+ u16 flag;
+ enum ice_sw_fwd_act_type fltr_act;
+ /* Depending on filter action */
+ union {
+ /* This is a queue ID in case of ICE_FWD_TO_Q and starting
+ * queue ID in case of ICE_FWD_TO_QGRP.
+ */
+ u16 q_id:11;
+ u16 vsi_id:10;
+ u16 hw_vsi_id:10;
+ u16 vsi_list_id:10;
+ } fwd_id;
+ /* software VSI handle */
+ u16 vsi_handle;
+ u8 qgrp_size;
+};
+
+struct ice_rule_query_data {
+ /* Recipe ID for which the requested rule was added */
+ u16 rid;
+ /* Rule ID that was added or is supposed to be removed */
+ u16 rule_id;
+ /* vsi_handle for which Rule was added or is supposed to be removed */
+ u16 vsi_handle;
+};
+
+/* This structure allows to pass info about lb_en and lan_en
+ * flags to ice_add_adv_rule. Values in act would be used
+ * only if act_valid was set to true, otherwise default
+ * values would be used.
+ */
+struct ice_adv_rule_flags_info {
+ u32 act;
+ u8 act_valid; /* indicate if flags in act are valid */
+};
+
+struct ice_adv_rule_info {
+ struct ice_sw_act_ctrl sw_act;
+ u32 priority;
+ u8 rx; /* true means LOOKUP_RX otherwise LOOKUP_TX */
+ u16 fltr_rule_id;
+ struct ice_adv_rule_flags_info flags_info;
+};
+
+/* A collection of one or more four word recipe */
struct ice_sw_recipe {
- struct list_head l_entry;
+ /* For a chained recipe the root recipe is what should be used for
+ * programming rules
+ */
+ u8 is_root;
+ u8 root_rid;
+ u8 recp_created;
+
+ /* Number of extraction words */
+ u8 n_ext_words;
+ /* Protocol ID and Offset pair (extraction word) to describe the
+ * recipe
+ */
+ struct ice_fv_word ext_words[ICE_MAX_CHAIN_WORDS];
+ u16 word_masks[ICE_MAX_CHAIN_WORDS];
+
+ /* if this recipe is a collection of other recipe */
+ u8 big_recp;
- /* To protect modification of filt_rule list
- * defined below
+ /* if this recipe is part of another bigger recipe then chain index
+ * corresponding to this recipe
*/
- struct mutex filt_rule_lock;
+ u8 chain_idx;
+
+ /* if this recipe is a collection of other recipe then count of other
+ * recipes and recipe IDs of those recipes
+ */
+ u8 n_grp_count;
+
+ /* Bit map specifying the IDs associated with this group of recipe */
+ DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES);
- /* List of type ice_fltr_mgmt_list_entry */
+ /* List of type ice_fltr_mgmt_list_entry or adv_rule */
+ u8 adv_rule;
struct list_head filt_rules;
struct list_head filt_replay_rules;
- /* linked list of type recipe_list_entry */
- struct list_head rg_list;
- /* linked list of type ice_sw_fv_list_entry*/
+ struct mutex filt_rule_lock; /* protect filter rule structure */
+
+ /* Profiles this recipe should be associated with */
struct list_head fv_list;
- struct ice_aqc_recipe_data_elem *r_buf;
- u8 recp_count;
- u8 root_rid;
- u8 num_profs;
- u8 *prof_ids;
- /* recipe bitmap: what all recipes makes this recipe */
- DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES);
+ /* Profiles this recipe is associated with */
+ u8 num_profs, *prof_ids;
+
+ /* Bit map for possible result indexes */
+ DECLARE_BITMAP(res_idxs, ICE_MAX_FV_WORDS);
+
+ /* This allows user to specify the recipe priority.
+ * For now, this becomes 'fwd_priority' when recipe
+ * is created, usually recipes can have 'fwd' and 'join'
+ * priority.
+ */
+ u8 priority;
+
+ struct list_head rg_list;
+
+ /* AQ buffer associated with this recipe */
+ struct ice_aqc_recipe_data_elem *root_buf;
+ /* This struct saves the fv_words for a given lookup */
+ struct ice_prot_lkup_ext lkup_exts;
};
/* Bookkeeping structure to hold bitmap of VSIs corresponding to VSI list ID */
@@ -183,6 +277,16 @@ struct ice_fltr_mgmt_list_entry {
u8 counter_index;
};
+struct ice_adv_fltr_mgmt_list_entry {
+ struct list_head list_entry;
+
+ struct ice_adv_lkup_elem *lkups;
+ struct ice_adv_rule_info rule_info;
+ u16 lkups_cnt;
+ struct ice_vsi_list_map_info *vsi_list_info;
+ u16 vsi_count;
+};
+
enum ice_promisc_flags {
ICE_PROMISC_UCAST_RX = 0x1,
ICE_PROMISC_UCAST_TX = 0x2,
@@ -218,6 +322,10 @@ ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
u16 counter_id);
/* Switch/bridge related commands */
+enum ice_status
+ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
+ u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
+ struct ice_rule_query_data *added_entry);
enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw);
enum ice_status ice_add_mac(struct ice_hw *hw, struct list_head *m_lst);
enum ice_status ice_remove_mac(struct ice_hw *hw, struct list_head *m_lst);
@@ -227,6 +335,8 @@ enum ice_status
ice_remove_eth_mac(struct ice_hw *hw, struct list_head *em_list);
int
ice_cfg_rdma_fltr(struct ice_hw *hw, u16 vsi_handle, bool enable);
+bool ice_mac_fltr_exist(struct ice_hw *hw, u8 *mac, u16 vsi_handle);
+bool ice_vlan_fltr_exist(struct ice_hw *hw, u16 vlan_id, u16 vsi_handle);
void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle);
enum ice_status
ice_add_vlan(struct ice_hw *hw, struct list_head *m_list);
@@ -245,10 +355,19 @@ enum ice_status
ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
bool rm_vlan_promisc);
+enum ice_status
+ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle);
+enum ice_status
+ice_rem_adv_rule_by_id(struct ice_hw *hw,
+ struct ice_rule_query_data *remove_entry);
+
enum ice_status ice_init_def_sw_recp(struct ice_hw *hw);
u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle);
enum ice_status ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle);
void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw);
+enum ice_status
+ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
+ u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd);
#endif /* _ICE_SWITCH_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
new file mode 100644
index 000000000000..725caa160b13
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
@@ -0,0 +1,1056 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2019-2021, Intel Corporation. */
+
+#include "ice.h"
+#include "ice_tc_lib.h"
+#include "ice_lib.h"
+#include "ice_fltr.h"
+
+/**
+ * ice_tc_count_lkups - determine lookup count for switch filter
+ * @flags: TC-flower flags
+ * @headers: Pointer to TC flower filter header structure
+ * @fltr: Pointer to outer TC filter structure
+ *
+ * Determine lookup count based on TC flower input for switch filter.
+ */
+static int
+ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers,
+ struct ice_tc_flower_fltr *fltr)
+{
+ int lkups_cnt = 0;
+
+ if (flags & ICE_TC_FLWR_FIELD_ETH_TYPE_ID)
+ lkups_cnt++;
+
+ /* are MAC fields specified? */
+ if (flags & (ICE_TC_FLWR_FIELD_DST_MAC | ICE_TC_FLWR_FIELD_SRC_MAC))
+ lkups_cnt++;
+
+ /* is VLAN specified? */
+ if (flags & ICE_TC_FLWR_FIELD_VLAN)
+ lkups_cnt++;
+
+ /* are IPv[4|6] fields specified? */
+ if (flags & (ICE_TC_FLWR_FIELD_DEST_IPV4 | ICE_TC_FLWR_FIELD_SRC_IPV4))
+ lkups_cnt++;
+ else if (flags & (ICE_TC_FLWR_FIELD_DEST_IPV6 |
+ ICE_TC_FLWR_FIELD_SRC_IPV6))
+ lkups_cnt++;
+
+ /* is L4 (TCP/UDP/any other L4 protocol fields) specified? */
+ if (flags & (ICE_TC_FLWR_FIELD_DEST_L4_PORT |
+ ICE_TC_FLWR_FIELD_SRC_L4_PORT))
+ lkups_cnt++;
+
+ return lkups_cnt;
+}
+
+/**
+ * ice_tc_fill_rules - fill filter rules based on TC fltr
+ * @hw: pointer to HW structure
+ * @flags: tc flower field flags
+ * @tc_fltr: pointer to TC flower filter
+ * @list: list of advance rule elements
+ * @rule_info: pointer to information about rule
+ * @l4_proto: pointer to information such as L4 proto type
+ *
+ * Fill ice_adv_lkup_elem list based on TC flower flags and
+ * TC flower headers. This list should be used to add
+ * advance filter in hardware.
+ */
+static int
+ice_tc_fill_rules(struct ice_hw *hw, u32 flags,
+ struct ice_tc_flower_fltr *tc_fltr,
+ struct ice_adv_lkup_elem *list,
+ struct ice_adv_rule_info *rule_info,
+ u16 *l4_proto)
+{
+ struct ice_tc_flower_lyr_2_4_hdrs *headers = &tc_fltr->outer_headers;
+ int i = 0;
+
+ if (flags & ICE_TC_FLWR_FIELD_ETH_TYPE_ID) {
+ list[i].type = ICE_ETYPE_OL;
+ list[i].h_u.ethertype.ethtype_id = headers->l2_key.n_proto;
+ list[i].m_u.ethertype.ethtype_id = headers->l2_mask.n_proto;
+ i++;
+ }
+
+ if (flags & (ICE_TC_FLWR_FIELD_DST_MAC |
+ ICE_TC_FLWR_FIELD_SRC_MAC)) {
+ struct ice_tc_l2_hdr *l2_key, *l2_mask;
+
+ l2_key = &headers->l2_key;
+ l2_mask = &headers->l2_mask;
+
+ list[i].type = ICE_MAC_OFOS;
+ if (flags & ICE_TC_FLWR_FIELD_DST_MAC) {
+ ether_addr_copy(list[i].h_u.eth_hdr.dst_addr,
+ l2_key->dst_mac);
+ ether_addr_copy(list[i].m_u.eth_hdr.dst_addr,
+ l2_mask->dst_mac);
+ }
+ if (flags & ICE_TC_FLWR_FIELD_SRC_MAC) {
+ ether_addr_copy(list[i].h_u.eth_hdr.src_addr,
+ l2_key->src_mac);
+ ether_addr_copy(list[i].m_u.eth_hdr.src_addr,
+ l2_mask->src_mac);
+ }
+ i++;
+ }
+
+ /* copy VLAN info */
+ if (flags & ICE_TC_FLWR_FIELD_VLAN) {
+ list[i].type = ICE_VLAN_OFOS;
+ list[i].h_u.vlan_hdr.vlan = headers->vlan_hdr.vlan_id;
+ list[i].m_u.vlan_hdr.vlan = cpu_to_be16(0xFFFF);
+ i++;
+ }
+
+ /* copy L3 (IPv[4|6]: src, dest) address */
+ if (flags & (ICE_TC_FLWR_FIELD_DEST_IPV4 |
+ ICE_TC_FLWR_FIELD_SRC_IPV4)) {
+ struct ice_tc_l3_hdr *l3_key, *l3_mask;
+
+ list[i].type = ICE_IPV4_OFOS;
+ l3_key = &headers->l3_key;
+ l3_mask = &headers->l3_mask;
+ if (flags & ICE_TC_FLWR_FIELD_DEST_IPV4) {
+ list[i].h_u.ipv4_hdr.dst_addr = l3_key->dst_ipv4;
+ list[i].m_u.ipv4_hdr.dst_addr = l3_mask->dst_ipv4;
+ }
+ if (flags & ICE_TC_FLWR_FIELD_SRC_IPV4) {
+ list[i].h_u.ipv4_hdr.src_addr = l3_key->src_ipv4;
+ list[i].m_u.ipv4_hdr.src_addr = l3_mask->src_ipv4;
+ }
+ i++;
+ } else if (flags & (ICE_TC_FLWR_FIELD_DEST_IPV6 |
+ ICE_TC_FLWR_FIELD_SRC_IPV6)) {
+ struct ice_ipv6_hdr *ipv6_hdr, *ipv6_mask;
+ struct ice_tc_l3_hdr *l3_key, *l3_mask;
+
+ list[i].type = ICE_IPV6_OFOS;
+ ipv6_hdr = &list[i].h_u.ipv6_hdr;
+ ipv6_mask = &list[i].m_u.ipv6_hdr;
+ l3_key = &headers->l3_key;
+ l3_mask = &headers->l3_mask;
+
+ if (flags & ICE_TC_FLWR_FIELD_DEST_IPV6) {
+ memcpy(&ipv6_hdr->dst_addr, &l3_key->dst_ipv6_addr,
+ sizeof(l3_key->dst_ipv6_addr));
+ memcpy(&ipv6_mask->dst_addr, &l3_mask->dst_ipv6_addr,
+ sizeof(l3_mask->dst_ipv6_addr));
+ }
+ if (flags & ICE_TC_FLWR_FIELD_SRC_IPV6) {
+ memcpy(&ipv6_hdr->src_addr, &l3_key->src_ipv6_addr,
+ sizeof(l3_key->src_ipv6_addr));
+ memcpy(&ipv6_mask->src_addr, &l3_mask->src_ipv6_addr,
+ sizeof(l3_mask->src_ipv6_addr));
+ }
+ i++;
+ }
+
+ /* copy L4 (src, dest) port */
+ if (flags & (ICE_TC_FLWR_FIELD_DEST_L4_PORT |
+ ICE_TC_FLWR_FIELD_SRC_L4_PORT)) {
+ struct ice_tc_l4_hdr *l4_key, *l4_mask;
+
+ l4_key = &headers->l4_key;
+ l4_mask = &headers->l4_mask;
+ if (headers->l3_key.ip_proto == IPPROTO_TCP) {
+ list[i].type = ICE_TCP_IL;
+ /* detected L4 proto is TCP */
+ if (l4_proto)
+ *l4_proto = IPPROTO_TCP;
+ } else if (headers->l3_key.ip_proto == IPPROTO_UDP) {
+ list[i].type = ICE_UDP_ILOS;
+ /* detected L4 proto is UDP */
+ if (l4_proto)
+ *l4_proto = IPPROTO_UDP;
+ }
+ if (flags & ICE_TC_FLWR_FIELD_DEST_L4_PORT) {
+ list[i].h_u.l4_hdr.dst_port = l4_key->dst_port;
+ list[i].m_u.l4_hdr.dst_port = l4_mask->dst_port;
+ }
+ if (flags & ICE_TC_FLWR_FIELD_SRC_L4_PORT) {
+ list[i].h_u.l4_hdr.src_port = l4_key->src_port;
+ list[i].m_u.l4_hdr.src_port = l4_mask->src_port;
+ }
+ i++;
+ }
+
+ return i;
+}
+
+static int
+ice_eswitch_tc_parse_action(struct ice_tc_flower_fltr *fltr,
+ struct flow_action_entry *act)
+{
+ struct ice_repr *repr;
+
+ switch (act->id) {
+ case FLOW_ACTION_DROP:
+ fltr->action.fltr_act = ICE_DROP_PACKET;
+ break;
+
+ case FLOW_ACTION_REDIRECT:
+ fltr->action.fltr_act = ICE_FWD_TO_VSI;
+
+ if (ice_is_port_repr_netdev(act->dev)) {
+ repr = ice_netdev_to_repr(act->dev);
+
+ fltr->dest_vsi = repr->src_vsi;
+ fltr->direction = ICE_ESWITCH_FLTR_INGRESS;
+ } else if (netif_is_ice(act->dev)) {
+ struct ice_netdev_priv *np = netdev_priv(act->dev);
+
+ fltr->dest_vsi = np->vsi;
+ fltr->direction = ICE_ESWITCH_FLTR_EGRESS;
+ } else {
+ NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported netdevice in switchdev mode");
+ return -EINVAL;
+ }
+
+ break;
+
+ default:
+ NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported action in switchdev mode");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
+{
+ struct ice_tc_flower_lyr_2_4_hdrs *headers = &fltr->outer_headers;
+ struct ice_adv_rule_info rule_info = { 0 };
+ struct ice_rule_query_data rule_added;
+ struct ice_hw *hw = &vsi->back->hw;
+ struct ice_adv_lkup_elem *list;
+ u32 flags = fltr->flags;
+ enum ice_status status;
+ int lkups_cnt;
+ int ret = 0;
+ int i;
+
+ if (!flags || (flags & (ICE_TC_FLWR_FIELD_ENC_DEST_IPV4 |
+ ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 |
+ ICE_TC_FLWR_FIELD_ENC_DEST_IPV6 |
+ ICE_TC_FLWR_FIELD_ENC_SRC_IPV6 |
+ ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT))) {
+ NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported encap field(s)");
+ return -EOPNOTSUPP;
+ }
+
+ lkups_cnt = ice_tc_count_lkups(flags, headers, fltr);
+ list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC);
+ if (!list)
+ return -ENOMEM;
+
+ i = ice_tc_fill_rules(hw, flags, fltr, list, &rule_info, NULL);
+ if (i != lkups_cnt) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ rule_info.sw_act.fltr_act = fltr->action.fltr_act;
+ if (fltr->action.fltr_act != ICE_DROP_PACKET)
+ rule_info.sw_act.vsi_handle = fltr->dest_vsi->idx;
+ /* For now, making priority to be highest, and it also becomes
+ * the priority for recipe which will get created as a result of
+ * new extraction sequence based on input set.
+ * Priority '7' is max val for switch recipe, higher the number
+ * results into order of switch rule evaluation.
+ */
+ rule_info.priority = 7;
+
+ if (fltr->direction == ICE_ESWITCH_FLTR_INGRESS) {
+ rule_info.sw_act.flag |= ICE_FLTR_RX;
+ rule_info.sw_act.src = hw->pf_id;
+ rule_info.rx = true;
+ } else {
+ rule_info.sw_act.flag |= ICE_FLTR_TX;
+ rule_info.sw_act.src = vsi->idx;
+ rule_info.rx = false;
+ rule_info.flags_info.act = ICE_SINGLE_ACT_LAN_ENABLE;
+ rule_info.flags_info.act_valid = true;
+ }
+
+ /* specify the cookie as filter_rule_id */
+ rule_info.fltr_rule_id = fltr->cookie;
+
+ status = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, &rule_added);
+ if (status == ICE_ERR_ALREADY_EXISTS) {
+ NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because it already exist");
+ ret = -EINVAL;
+ goto exit;
+ } else if (status) {
+ NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter due to error");
+ ret = -EIO;
+ goto exit;
+ }
+
+ /* store the output params, which are needed later for removing
+ * advanced switch filter
+ */
+ fltr->rid = rule_added.rid;
+ fltr->rule_id = rule_added.rule_id;
+
+exit:
+ kfree(list);
+ return ret;
+}
+
+/**
+ * ice_add_tc_flower_adv_fltr - add appropriate filter rules
+ * @vsi: Pointer to VSI
+ * @tc_fltr: Pointer to TC flower filter structure
+ *
+ * based on filter parameters using Advance recipes supported
+ * by OS package.
+ */
+static int
+ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi,
+ struct ice_tc_flower_fltr *tc_fltr)
+{
+ struct ice_tc_flower_lyr_2_4_hdrs *headers = &tc_fltr->outer_headers;
+ struct ice_adv_rule_info rule_info = {0};
+ struct ice_rule_query_data rule_added;
+ struct ice_adv_lkup_elem *list;
+ struct ice_pf *pf = vsi->back;
+ struct ice_hw *hw = &pf->hw;
+ u32 flags = tc_fltr->flags;
+ struct ice_vsi *ch_vsi;
+ struct device *dev;
+ u16 lkups_cnt = 0;
+ u16 l4_proto = 0;
+ int ret = 0;
+ u16 i = 0;
+
+ dev = ice_pf_to_dev(pf);
+ if (ice_is_safe_mode(pf)) {
+ NL_SET_ERR_MSG_MOD(tc_fltr->extack, "Unable to add filter because driver is in safe mode");
+ return -EOPNOTSUPP;
+ }
+
+ if (!flags || (flags & (ICE_TC_FLWR_FIELD_ENC_DEST_IPV4 |
+ ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 |
+ ICE_TC_FLWR_FIELD_ENC_DEST_IPV6 |
+ ICE_TC_FLWR_FIELD_ENC_SRC_IPV6 |
+ ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT))) {
+ NL_SET_ERR_MSG_MOD(tc_fltr->extack, "Unsupported encap field(s)");
+ return -EOPNOTSUPP;
+ }
+
+ /* get the channel (aka ADQ VSI) */
+ if (tc_fltr->dest_vsi)
+ ch_vsi = tc_fltr->dest_vsi;
+ else
+ ch_vsi = vsi->tc_map_vsi[tc_fltr->action.tc_class];
+
+ lkups_cnt = ice_tc_count_lkups(flags, headers, tc_fltr);
+ list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC);
+ if (!list)
+ return -ENOMEM;
+
+ i = ice_tc_fill_rules(hw, flags, tc_fltr, list, &rule_info, &l4_proto);
+ if (i != lkups_cnt) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ rule_info.sw_act.fltr_act = tc_fltr->action.fltr_act;
+ if (tc_fltr->action.tc_class >= ICE_CHNL_START_TC) {
+ if (!ch_vsi) {
+ NL_SET_ERR_MSG_MOD(tc_fltr->extack, "Unable to add filter because specified destination doesn't exist");
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
+ rule_info.sw_act.vsi_handle = ch_vsi->idx;
+ rule_info.priority = 7;
+ rule_info.sw_act.src = hw->pf_id;
+ rule_info.rx = true;
+ dev_dbg(dev, "add switch rule for TC:%u vsi_idx:%u, lkups_cnt:%u\n",
+ tc_fltr->action.tc_class,
+ rule_info.sw_act.vsi_handle, lkups_cnt);
+ } else {
+ rule_info.sw_act.flag |= ICE_FLTR_TX;
+ rule_info.sw_act.src = vsi->idx;
+ rule_info.rx = false;
+ }
+
+ /* specify the cookie as filter_rule_id */
+ rule_info.fltr_rule_id = tc_fltr->cookie;
+
+ ret = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, &rule_added);
+ if (ret == -EEXIST) {
+ NL_SET_ERR_MSG_MOD(tc_fltr->extack,
+ "Unable to add filter because it already exist");
+ ret = -EINVAL;
+ goto exit;
+ } else if (ret) {
+ NL_SET_ERR_MSG_MOD(tc_fltr->extack,
+ "Unable to add filter due to error");
+ ret = -EIO;
+ goto exit;
+ }
+
+ /* store the output params, which are needed later for removing
+ * advanced switch filter
+ */
+ tc_fltr->rid = rule_added.rid;
+ tc_fltr->rule_id = rule_added.rule_id;
+ if (tc_fltr->action.tc_class > 0 && ch_vsi) {
+ /* For PF ADQ, VSI type is set as ICE_VSI_CHNL, and
+ * for PF ADQ filter, it is not yet set in tc_fltr,
+ * hence store the dest_vsi ptr in tc_fltr
+ */
+ if (ch_vsi->type == ICE_VSI_CHNL)
+ tc_fltr->dest_vsi = ch_vsi;
+ /* keep track of advanced switch filter for
+ * destination VSI (channel VSI)
+ */
+ ch_vsi->num_chnl_fltr++;
+ /* in this case, dest_id is VSI handle (sw handle) */
+ tc_fltr->dest_id = rule_added.vsi_handle;
+
+ /* keeps track of channel filters for PF VSI */
+ if (vsi->type == ICE_VSI_PF &&
+ (flags & (ICE_TC_FLWR_FIELD_DST_MAC |
+ ICE_TC_FLWR_FIELD_ENC_DST_MAC)))
+ pf->num_dmac_chnl_fltrs++;
+ }
+ dev_dbg(dev, "added switch rule (lkups_cnt %u, flags 0x%x) for TC %u, rid %u, rule_id %u, vsi_idx %u\n",
+ lkups_cnt, flags,
+ tc_fltr->action.tc_class, rule_added.rid,
+ rule_added.rule_id, rule_added.vsi_handle);
+exit:
+ kfree(list);
+ return ret;
+}
+
+/**
+ * ice_tc_set_ipv4 - Parse IPv4 addresses from TC flower filter
+ * @match: Pointer to flow match structure
+ * @fltr: Pointer to filter structure
+ * @headers: inner or outer header fields
+ */
+static int
+ice_tc_set_ipv4(struct flow_match_ipv4_addrs *match,
+ struct ice_tc_flower_fltr *fltr,
+ struct ice_tc_flower_lyr_2_4_hdrs *headers)
+{
+ if (match->key->dst) {
+ fltr->flags |= ICE_TC_FLWR_FIELD_DEST_IPV4;
+ headers->l3_key.dst_ipv4 = match->key->dst;
+ headers->l3_mask.dst_ipv4 = match->mask->dst;
+ }
+ if (match->key->src) {
+ fltr->flags |= ICE_TC_FLWR_FIELD_SRC_IPV4;
+ headers->l3_key.src_ipv4 = match->key->src;
+ headers->l3_mask.src_ipv4 = match->mask->src;
+ }
+ return 0;
+}
+
+/**
+ * ice_tc_set_ipv6 - Parse IPv6 addresses from TC flower filter
+ * @match: Pointer to flow match structure
+ * @fltr: Pointer to filter structure
+ * @headers: inner or outer header fields
+ */
+static int
+ice_tc_set_ipv6(struct flow_match_ipv6_addrs *match,
+ struct ice_tc_flower_fltr *fltr,
+ struct ice_tc_flower_lyr_2_4_hdrs *headers)
+{
+ struct ice_tc_l3_hdr *l3_key, *l3_mask;
+
+ /* src and dest IPV6 address should not be LOOPBACK
+ * (0:0:0:0:0:0:0:1), which can be represented as ::1
+ */
+ if (ipv6_addr_loopback(&match->key->dst) ||
+ ipv6_addr_loopback(&match->key->src)) {
+ NL_SET_ERR_MSG_MOD(fltr->extack, "Bad IPv6, addr is LOOPBACK");
+ return -EINVAL;
+ }
+ /* if src/dest IPv6 address is *,* error */
+ if (ipv6_addr_any(&match->mask->dst) &&
+ ipv6_addr_any(&match->mask->src)) {
+ NL_SET_ERR_MSG_MOD(fltr->extack, "Bad src/dest IPv6, addr is any");
+ return -EINVAL;
+ }
+ if (!ipv6_addr_any(&match->mask->dst))
+ fltr->flags |= ICE_TC_FLWR_FIELD_DEST_IPV6;
+ if (!ipv6_addr_any(&match->mask->src))
+ fltr->flags |= ICE_TC_FLWR_FIELD_SRC_IPV6;
+
+ l3_key = &headers->l3_key;
+ l3_mask = &headers->l3_mask;
+
+ if (fltr->flags & ICE_TC_FLWR_FIELD_SRC_IPV6) {
+ memcpy(&l3_key->src_ipv6_addr, &match->key->src.s6_addr,
+ sizeof(match->key->src.s6_addr));
+ memcpy(&l3_mask->src_ipv6_addr, &match->mask->src.s6_addr,
+ sizeof(match->mask->src.s6_addr));
+ }
+ if (fltr->flags & ICE_TC_FLWR_FIELD_DEST_IPV6) {
+ memcpy(&l3_key->dst_ipv6_addr, &match->key->dst.s6_addr,
+ sizeof(match->key->dst.s6_addr));
+ memcpy(&l3_mask->dst_ipv6_addr, &match->mask->dst.s6_addr,
+ sizeof(match->mask->dst.s6_addr));
+ }
+
+ return 0;
+}
+
+/**
+ * ice_tc_set_port - Parse ports from TC flower filter
+ * @match: Flow match structure
+ * @fltr: Pointer to filter structure
+ * @headers: inner or outer header fields
+ */
+static int
+ice_tc_set_port(struct flow_match_ports match,
+ struct ice_tc_flower_fltr *fltr,
+ struct ice_tc_flower_lyr_2_4_hdrs *headers)
+{
+ if (match.key->dst) {
+ fltr->flags |= ICE_TC_FLWR_FIELD_DEST_L4_PORT;
+ headers->l4_key.dst_port = match.key->dst;
+ headers->l4_mask.dst_port = match.mask->dst;
+ }
+ if (match.key->src) {
+ fltr->flags |= ICE_TC_FLWR_FIELD_SRC_L4_PORT;
+ headers->l4_key.src_port = match.key->src;
+ headers->l4_mask.src_port = match.mask->src;
+ }
+ return 0;
+}
+
+/**
+ * ice_parse_cls_flower - Parse TC flower filters provided by kernel
+ * @vsi: Pointer to the VSI
+ * @filter_dev: Pointer to device on which filter is being added
+ * @f: Pointer to struct flow_cls_offload
+ * @fltr: Pointer to filter structure
+ */
+static int
+ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
+ struct flow_cls_offload *f,
+ struct ice_tc_flower_fltr *fltr)
+{
+ struct ice_tc_flower_lyr_2_4_hdrs *headers = &fltr->outer_headers;
+ struct flow_rule *rule = flow_cls_offload_flow_rule(f);
+ u16 n_proto_mask = 0, n_proto_key = 0, addr_type = 0;
+ struct flow_dissector *dissector;
+
+ dissector = rule->match.dissector;
+
+ if (dissector->used_keys &
+ ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
+ BIT(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_VLAN) |
+ BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) |
+ BIT(FLOW_DISSECTOR_KEY_ENC_IP) |
+ BIT(FLOW_DISSECTOR_KEY_PORTS))) {
+ NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported key used");
+ return -EOPNOTSUPP;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_match_basic match;
+
+ flow_rule_match_basic(rule, &match);
+
+ n_proto_key = ntohs(match.key->n_proto);
+ n_proto_mask = ntohs(match.mask->n_proto);
+
+ if (n_proto_key == ETH_P_ALL || n_proto_key == 0) {
+ n_proto_key = 0;
+ n_proto_mask = 0;
+ } else {
+ fltr->flags |= ICE_TC_FLWR_FIELD_ETH_TYPE_ID;
+ }
+
+ headers->l2_key.n_proto = cpu_to_be16(n_proto_key);
+ headers->l2_mask.n_proto = cpu_to_be16(n_proto_mask);
+ headers->l3_key.ip_proto = match.key->ip_proto;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ struct flow_match_eth_addrs match;
+
+ flow_rule_match_eth_addrs(rule, &match);
+
+ if (!is_zero_ether_addr(match.key->dst)) {
+ ether_addr_copy(headers->l2_key.dst_mac,
+ match.key->dst);
+ ether_addr_copy(headers->l2_mask.dst_mac,
+ match.mask->dst);
+ fltr->flags |= ICE_TC_FLWR_FIELD_DST_MAC;
+ }
+
+ if (!is_zero_ether_addr(match.key->src)) {
+ ether_addr_copy(headers->l2_key.src_mac,
+ match.key->src);
+ ether_addr_copy(headers->l2_mask.src_mac,
+ match.mask->src);
+ fltr->flags |= ICE_TC_FLWR_FIELD_SRC_MAC;
+ }
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN) ||
+ is_vlan_dev(filter_dev)) {
+ struct flow_dissector_key_vlan mask;
+ struct flow_dissector_key_vlan key;
+ struct flow_match_vlan match;
+
+ if (is_vlan_dev(filter_dev)) {
+ match.key = &key;
+ match.key->vlan_id = vlan_dev_vlan_id(filter_dev);
+ match.key->vlan_priority = 0;
+ match.mask = &mask;
+ memset(match.mask, 0xff, sizeof(*match.mask));
+ match.mask->vlan_priority = 0;
+ } else {
+ flow_rule_match_vlan(rule, &match);
+ }
+
+ if (match.mask->vlan_id) {
+ if (match.mask->vlan_id == VLAN_VID_MASK) {
+ fltr->flags |= ICE_TC_FLWR_FIELD_VLAN;
+ } else {
+ NL_SET_ERR_MSG_MOD(fltr->extack, "Bad VLAN mask");
+ return -EINVAL;
+ }
+ }
+
+ headers->vlan_hdr.vlan_id =
+ cpu_to_be16(match.key->vlan_id & VLAN_VID_MASK);
+ if (match.mask->vlan_priority)
+ headers->vlan_hdr.vlan_prio = match.key->vlan_priority;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
+ struct flow_match_control match;
+
+ flow_rule_match_control(rule, &match);
+
+ addr_type = match.key->addr_type;
+ }
+
+ if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
+ struct flow_match_ipv4_addrs match;
+
+ flow_rule_match_ipv4_addrs(rule, &match);
+ if (ice_tc_set_ipv4(&match, fltr, headers))
+ return -EINVAL;
+ }
+
+ if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
+ struct flow_match_ipv6_addrs match;
+
+ flow_rule_match_ipv6_addrs(rule, &match);
+ if (ice_tc_set_ipv6(&match, fltr, headers))
+ return -EINVAL;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
+ struct flow_match_ports match;
+
+ flow_rule_match_ports(rule, &match);
+ if (ice_tc_set_port(match, fltr, headers))
+ return -EINVAL;
+ switch (headers->l3_key.ip_proto) {
+ case IPPROTO_TCP:
+ case IPPROTO_UDP:
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(fltr->extack, "Only UDP and TCP transport are supported");
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+/**
+ * ice_add_switch_fltr - Add TC flower filters
+ * @vsi: Pointer to VSI
+ * @fltr: Pointer to struct ice_tc_flower_fltr
+ *
+ * Add filter in HW switch block
+ */
+static int
+ice_add_switch_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
+{
+ if (fltr->action.fltr_act == ICE_FWD_TO_QGRP)
+ return -EOPNOTSUPP;
+
+ if (ice_is_eswitch_mode_switchdev(vsi->back))
+ return ice_eswitch_add_tc_fltr(vsi, fltr);
+
+ return ice_add_tc_flower_adv_fltr(vsi, fltr);
+}
+
+/**
+ * ice_handle_tclass_action - Support directing to a traffic class
+ * @vsi: Pointer to VSI
+ * @cls_flower: Pointer to TC flower offload structure
+ * @fltr: Pointer to TC flower filter structure
+ *
+ * Support directing traffic to a traffic class
+ */
+static int
+ice_handle_tclass_action(struct ice_vsi *vsi,
+ struct flow_cls_offload *cls_flower,
+ struct ice_tc_flower_fltr *fltr)
+{
+ int tc = tc_classid_to_hwtc(vsi->netdev, cls_flower->classid);
+ struct ice_vsi *main_vsi;
+
+ if (tc < 0) {
+ NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because specified destination is invalid");
+ return -EINVAL;
+ }
+ if (!tc) {
+ NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because of invalid destination");
+ return -EINVAL;
+ }
+
+ if (!(vsi->all_enatc & BIT(tc))) {
+ NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because of non-existence destination");
+ return -EINVAL;
+ }
+
+ /* Redirect to a TC class or Queue Group */
+ main_vsi = ice_get_main_vsi(vsi->back);
+ if (!main_vsi || !main_vsi->netdev) {
+ NL_SET_ERR_MSG_MOD(fltr->extack,
+ "Unable to add filter because of invalid netdevice");
+ return -EINVAL;
+ }
+
+ if ((fltr->flags & ICE_TC_FLWR_FIELD_TENANT_ID) &&
+ (fltr->flags & (ICE_TC_FLWR_FIELD_DST_MAC |
+ ICE_TC_FLWR_FIELD_SRC_MAC))) {
+ NL_SET_ERR_MSG_MOD(fltr->extack,
+ "Unable to add filter because filter using tunnel key and inner MAC is unsupported combination");
+ return -EOPNOTSUPP;
+ }
+
+ /* For ADQ, filter must include dest MAC address, otherwise unwanted
+ * packets with unrelated MAC address get delivered to ADQ VSIs as long
+ * as remaining filter criteria is satisfied such as dest IP address
+ * and dest/src L4 port. Following code is trying to handle:
+ * 1. For non-tunnel, if user specify MAC addresses, use them (means
+ * this code won't do anything
+ * 2. For non-tunnel, if user didn't specify MAC address, add implicit
+ * dest MAC to be lower netdev's active unicast MAC address
+ */
+ if (!(fltr->flags & ICE_TC_FLWR_FIELD_DST_MAC)) {
+ ether_addr_copy(fltr->outer_headers.l2_key.dst_mac,
+ main_vsi->netdev->dev_addr);
+ eth_broadcast_addr(fltr->outer_headers.l2_mask.dst_mac);
+ fltr->flags |= ICE_TC_FLWR_FIELD_DST_MAC;
+ }
+
+ /* validate specified dest MAC address, make sure either it belongs to
+ * lower netdev or any of MACVLAN. MACVLANs MAC address are added as
+ * unicast MAC filter destined to main VSI.
+ */
+ if (!ice_mac_fltr_exist(&main_vsi->back->hw,
+ fltr->outer_headers.l2_key.dst_mac,
+ main_vsi->idx)) {
+ NL_SET_ERR_MSG_MOD(fltr->extack,
+ "Unable to add filter because legacy MAC filter for specified destination doesn't exist");
+ return -EINVAL;
+ }
+
+ /* Make sure VLAN is already added to main VSI, before allowing ADQ to
+ * add a VLAN based filter such as MAC + VLAN + L4 port.
+ */
+ if (fltr->flags & ICE_TC_FLWR_FIELD_VLAN) {
+ u16 vlan_id = be16_to_cpu(fltr->outer_headers.vlan_hdr.vlan_id);
+
+ if (!ice_vlan_fltr_exist(&main_vsi->back->hw, vlan_id,
+ main_vsi->idx)) {
+ NL_SET_ERR_MSG_MOD(fltr->extack,
+ "Unable to add filter because legacy VLAN filter for specified destination doesn't exist");
+ return -EINVAL;
+ }
+ }
+ fltr->action.fltr_act = ICE_FWD_TO_VSI;
+ fltr->action.tc_class = tc;
+
+ return 0;
+}
+
+/**
+ * ice_parse_tc_flower_actions - Parse the actions for a TC filter
+ * @vsi: Pointer to VSI
+ * @cls_flower: Pointer to TC flower offload structure
+ * @fltr: Pointer to TC flower filter structure
+ *
+ * Parse the actions for a TC filter
+ */
+static int
+ice_parse_tc_flower_actions(struct ice_vsi *vsi,
+ struct flow_cls_offload *cls_flower,
+ struct ice_tc_flower_fltr *fltr)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(cls_flower);
+ struct flow_action *flow_action = &rule->action;
+ struct flow_action_entry *act;
+ int i;
+
+ if (cls_flower->classid)
+ return ice_handle_tclass_action(vsi, cls_flower, fltr);
+
+ if (!flow_action_has_entries(flow_action))
+ return -EINVAL;
+
+ flow_action_for_each(i, act, flow_action) {
+ if (ice_is_eswitch_mode_switchdev(vsi->back)) {
+ int err = ice_eswitch_tc_parse_action(fltr, act);
+
+ if (err)
+ return err;
+ continue;
+ }
+ /* Allow only one rule per filter */
+
+ /* Drop action */
+ if (act->id == FLOW_ACTION_DROP) {
+ NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported action DROP");
+ return -EINVAL;
+ }
+ fltr->action.fltr_act = ICE_FWD_TO_VSI;
+ }
+ return 0;
+}
+
+/**
+ * ice_del_tc_fltr - deletes a filter from HW table
+ * @vsi: Pointer to VSI
+ * @fltr: Pointer to struct ice_tc_flower_fltr
+ *
+ * This function deletes a filter from HW table and manages book-keeping
+ */
+static int ice_del_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
+{
+ struct ice_rule_query_data rule_rem;
+ struct ice_pf *pf = vsi->back;
+ int err;
+
+ rule_rem.rid = fltr->rid;
+ rule_rem.rule_id = fltr->rule_id;
+ rule_rem.vsi_handle = fltr->dest_id;
+ err = ice_rem_adv_rule_by_id(&pf->hw, &rule_rem);
+ if (err) {
+ if (err == ICE_ERR_DOES_NOT_EXIST) {
+ NL_SET_ERR_MSG_MOD(fltr->extack, "Filter does not exist");
+ return -ENOENT;
+ }
+ NL_SET_ERR_MSG_MOD(fltr->extack, "Failed to delete TC flower filter");
+ return -EIO;
+ }
+
+ /* update advanced switch filter count for destination
+ * VSI if filter destination was VSI
+ */
+ if (fltr->dest_vsi) {
+ if (fltr->dest_vsi->type == ICE_VSI_CHNL) {
+ fltr->dest_vsi->num_chnl_fltr--;
+
+ /* keeps track of channel filters for PF VSI */
+ if (vsi->type == ICE_VSI_PF &&
+ (fltr->flags & (ICE_TC_FLWR_FIELD_DST_MAC |
+ ICE_TC_FLWR_FIELD_ENC_DST_MAC)))
+ pf->num_dmac_chnl_fltrs--;
+ }
+ }
+ return 0;
+}
+
+/**
+ * ice_add_tc_fltr - adds a TC flower filter
+ * @netdev: Pointer to netdev
+ * @vsi: Pointer to VSI
+ * @f: Pointer to flower offload structure
+ * @__fltr: Pointer to struct ice_tc_flower_fltr
+ *
+ * This function parses TC-flower input fields, parses action,
+ * and adds a filter.
+ */
+static int
+ice_add_tc_fltr(struct net_device *netdev, struct ice_vsi *vsi,
+ struct flow_cls_offload *f,
+ struct ice_tc_flower_fltr **__fltr)
+{
+ struct ice_tc_flower_fltr *fltr;
+ int err;
+
+ /* by default, set output to be INVALID */
+ *__fltr = NULL;
+
+ fltr = kzalloc(sizeof(*fltr), GFP_KERNEL);
+ if (!fltr)
+ return -ENOMEM;
+
+ fltr->cookie = f->cookie;
+ fltr->extack = f->common.extack;
+ fltr->src_vsi = vsi;
+ INIT_HLIST_NODE(&fltr->tc_flower_node);
+
+ err = ice_parse_cls_flower(netdev, vsi, f, fltr);
+ if (err < 0)
+ goto err;
+
+ err = ice_parse_tc_flower_actions(vsi, f, fltr);
+ if (err < 0)
+ goto err;
+
+ err = ice_add_switch_fltr(vsi, fltr);
+ if (err < 0)
+ goto err;
+
+ /* return the newly created filter */
+ *__fltr = fltr;
+
+ return 0;
+err:
+ kfree(fltr);
+ return err;
+}
+
+/**
+ * ice_find_tc_flower_fltr - Find the TC flower filter in the list
+ * @pf: Pointer to PF
+ * @cookie: filter specific cookie
+ */
+static struct ice_tc_flower_fltr *
+ice_find_tc_flower_fltr(struct ice_pf *pf, unsigned long cookie)
+{
+ struct ice_tc_flower_fltr *fltr;
+
+ hlist_for_each_entry(fltr, &pf->tc_flower_fltr_list, tc_flower_node)
+ if (cookie == fltr->cookie)
+ return fltr;
+
+ return NULL;
+}
+
+/**
+ * ice_add_cls_flower - add TC flower filters
+ * @netdev: Pointer to filter device
+ * @vsi: Pointer to VSI
+ * @cls_flower: Pointer to flower offload structure
+ */
+int
+ice_add_cls_flower(struct net_device *netdev, struct ice_vsi *vsi,
+ struct flow_cls_offload *cls_flower)
+{
+ struct netlink_ext_ack *extack = cls_flower->common.extack;
+ struct net_device *vsi_netdev = vsi->netdev;
+ struct ice_tc_flower_fltr *fltr;
+ struct ice_pf *pf = vsi->back;
+ int err;
+
+ if (ice_is_reset_in_progress(pf->state))
+ return -EBUSY;
+ if (test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags))
+ return -EINVAL;
+
+ if (ice_is_port_repr_netdev(netdev))
+ vsi_netdev = netdev;
+
+ if (!(vsi_netdev->features & NETIF_F_HW_TC) &&
+ !test_bit(ICE_FLAG_CLS_FLOWER, pf->flags)) {
+ /* Based on TC indirect notifications from kernel, all ice
+ * devices get an instance of rule from higher level device.
+ * Avoid triggering explicit error in this case.
+ */
+ if (netdev == vsi_netdev)
+ NL_SET_ERR_MSG_MOD(extack, "can't apply TC flower filters, turn ON hw-tc-offload and try again");
+ return -EINVAL;
+ }
+
+ /* avoid duplicate entries, if exists - return error */
+ fltr = ice_find_tc_flower_fltr(pf, cls_flower->cookie);
+ if (fltr) {
+ NL_SET_ERR_MSG_MOD(extack, "filter cookie already exists, ignoring");
+ return -EEXIST;
+ }
+
+ /* prep and add TC-flower filter in HW */
+ err = ice_add_tc_fltr(netdev, vsi, cls_flower, &fltr);
+ if (err)
+ return err;
+
+ /* add filter into an ordered list */
+ hlist_add_head(&fltr->tc_flower_node, &pf->tc_flower_fltr_list);
+ return 0;
+}
+
+/**
+ * ice_del_cls_flower - delete TC flower filters
+ * @vsi: Pointer to VSI
+ * @cls_flower: Pointer to struct flow_cls_offload
+ */
+int
+ice_del_cls_flower(struct ice_vsi *vsi, struct flow_cls_offload *cls_flower)
+{
+ struct ice_tc_flower_fltr *fltr;
+ struct ice_pf *pf = vsi->back;
+ int err;
+
+ /* find filter */
+ fltr = ice_find_tc_flower_fltr(pf, cls_flower->cookie);
+ if (!fltr) {
+ if (!test_bit(ICE_FLAG_TC_MQPRIO, pf->flags) &&
+ hlist_empty(&pf->tc_flower_fltr_list))
+ return 0;
+
+ NL_SET_ERR_MSG_MOD(cls_flower->common.extack, "failed to delete TC flower filter because unable to find it");
+ return -EINVAL;
+ }
+
+ fltr->extack = cls_flower->common.extack;
+ /* delete filter from HW */
+ err = ice_del_tc_fltr(vsi, fltr);
+ if (err)
+ return err;
+
+ /* delete filter from an ordered list */
+ hlist_del(&fltr->tc_flower_node);
+
+ /* free the filter node */
+ kfree(fltr);
+
+ return 0;
+}
+
+/**
+ * ice_replay_tc_fltrs - replay TC filters
+ * @pf: pointer to PF struct
+ */
+void ice_replay_tc_fltrs(struct ice_pf *pf)
+{
+ struct ice_tc_flower_fltr *fltr;
+ struct hlist_node *node;
+
+ hlist_for_each_entry_safe(fltr, node,
+ &pf->tc_flower_fltr_list,
+ tc_flower_node) {
+ fltr->extack = NULL;
+ ice_add_switch_fltr(fltr->src_vsi, fltr);
+ }
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.h b/drivers/net/ethernet/intel/ice/ice_tc_lib.h
new file mode 100644
index 000000000000..ee9b284fcc02
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.h
@@ -0,0 +1,152 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2019-2021, Intel Corporation. */
+
+#ifndef _ICE_TC_LIB_H_
+#define _ICE_TC_LIB_H_
+
+#define ICE_TC_FLWR_FIELD_DST_MAC BIT(0)
+#define ICE_TC_FLWR_FIELD_SRC_MAC BIT(1)
+#define ICE_TC_FLWR_FIELD_VLAN BIT(2)
+#define ICE_TC_FLWR_FIELD_DEST_IPV4 BIT(3)
+#define ICE_TC_FLWR_FIELD_SRC_IPV4 BIT(4)
+#define ICE_TC_FLWR_FIELD_DEST_IPV6 BIT(5)
+#define ICE_TC_FLWR_FIELD_SRC_IPV6 BIT(6)
+#define ICE_TC_FLWR_FIELD_DEST_L4_PORT BIT(7)
+#define ICE_TC_FLWR_FIELD_SRC_L4_PORT BIT(8)
+#define ICE_TC_FLWR_FIELD_TENANT_ID BIT(9)
+#define ICE_TC_FLWR_FIELD_ENC_DEST_IPV4 BIT(10)
+#define ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 BIT(11)
+#define ICE_TC_FLWR_FIELD_ENC_DEST_IPV6 BIT(12)
+#define ICE_TC_FLWR_FIELD_ENC_SRC_IPV6 BIT(13)
+#define ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT BIT(14)
+#define ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT BIT(15)
+#define ICE_TC_FLWR_FIELD_ENC_DST_MAC BIT(16)
+#define ICE_TC_FLWR_FIELD_ETH_TYPE_ID BIT(17)
+
+struct ice_tc_flower_action {
+ u32 tc_class;
+ enum ice_sw_fwd_act_type fltr_act;
+};
+
+struct ice_tc_vlan_hdr {
+ __be16 vlan_id; /* Only last 12 bits valid */
+ u16 vlan_prio; /* Only last 3 bits valid (valid values: 0..7) */
+};
+
+struct ice_tc_l2_hdr {
+ u8 dst_mac[ETH_ALEN];
+ u8 src_mac[ETH_ALEN];
+ __be16 n_proto; /* Ethernet Protocol */
+};
+
+struct ice_tc_l3_hdr {
+ u8 ip_proto; /* IPPROTO value */
+ union {
+ struct {
+ struct in_addr dst_ip;
+ struct in_addr src_ip;
+ } v4;
+ struct {
+ struct in6_addr dst_ip6;
+ struct in6_addr src_ip6;
+ } v6;
+ } ip;
+#define dst_ipv6 ip.v6.dst_ip6.s6_addr32
+#define dst_ipv6_addr ip.v6.dst_ip6.s6_addr
+#define src_ipv6 ip.v6.src_ip6.s6_addr32
+#define src_ipv6_addr ip.v6.src_ip6.s6_addr
+#define dst_ipv4 ip.v4.dst_ip.s_addr
+#define src_ipv4 ip.v4.src_ip.s_addr
+
+ u8 tos;
+ u8 ttl;
+};
+
+struct ice_tc_l4_hdr {
+ __be16 dst_port;
+ __be16 src_port;
+};
+
+struct ice_tc_flower_lyr_2_4_hdrs {
+ /* L2 layer fields with their mask */
+ struct ice_tc_l2_hdr l2_key;
+ struct ice_tc_l2_hdr l2_mask;
+ struct ice_tc_vlan_hdr vlan_hdr;
+ /* L3 (IPv4[6]) layer fields with their mask */
+ struct ice_tc_l3_hdr l3_key;
+ struct ice_tc_l3_hdr l3_mask;
+
+ /* L4 layer fields with their mask */
+ struct ice_tc_l4_hdr l4_key;
+ struct ice_tc_l4_hdr l4_mask;
+};
+
+enum ice_eswitch_fltr_direction {
+ ICE_ESWITCH_FLTR_INGRESS,
+ ICE_ESWITCH_FLTR_EGRESS,
+};
+
+struct ice_tc_flower_fltr {
+ struct hlist_node tc_flower_node;
+
+ /* cookie becomes filter_rule_id if rule is added successfully */
+ unsigned long cookie;
+
+ /* add_adv_rule returns information like recipe ID, rule_id. Store
+ * those values since they are needed to remove advanced rule
+ */
+ u16 rid;
+ u16 rule_id;
+ /* this could be queue/vsi_idx (sw handle)/queue_group, depending upon
+ * destination type
+ */
+ u16 dest_id;
+ /* if dest_id is vsi_idx, then need to store destination VSI ptr */
+ struct ice_vsi *dest_vsi;
+ /* direction of fltr for eswitch use case */
+ enum ice_eswitch_fltr_direction direction;
+
+ /* Parsed TC flower configuration params */
+ struct ice_tc_flower_lyr_2_4_hdrs outer_headers;
+ struct ice_tc_flower_lyr_2_4_hdrs inner_headers;
+ struct ice_vsi *src_vsi;
+ __be32 tenant_id;
+ u32 flags;
+ struct ice_tc_flower_action action;
+
+ /* cache ptr which is used wherever needed to communicate netlink
+ * messages
+ */
+ struct netlink_ext_ack *extack;
+};
+
+/**
+ * ice_is_chnl_fltr - is this a valid channel filter
+ * @f: Pointer to tc-flower filter
+ *
+ * Criteria to determine of given filter is valid channel filter
+ * or not is based on its "destination". If destination is hw_tc (aka tc_class)
+ * and it is non-zero, then it is valid channel (aka ADQ) filter
+ */
+static inline bool ice_is_chnl_fltr(struct ice_tc_flower_fltr *f)
+{
+ return !!f->action.tc_class;
+}
+
+/**
+ * ice_chnl_dmac_fltr_cnt - DMAC based CHNL filter count
+ * @pf: Pointer to PF
+ */
+static inline int ice_chnl_dmac_fltr_cnt(struct ice_pf *pf)
+{
+ return pf->num_dmac_chnl_fltrs;
+}
+
+int
+ice_add_cls_flower(struct net_device *netdev, struct ice_vsi *vsi,
+ struct flow_cls_offload *cls_flower);
+int
+ice_del_cls_flower(struct ice_vsi *vsi, struct flow_cls_offload *cls_flower);
+void ice_replay_tc_fltrs(struct ice_pf *pf);
+
+#endif /* _ICE_TC_LIB_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_trace.h b/drivers/net/ethernet/intel/ice/ice_trace.h
index 9bc0b8fdfc77..cf685247c07a 100644
--- a/drivers/net/ethernet/intel/ice/ice_trace.h
+++ b/drivers/net/ethernet/intel/ice/ice_trace.h
@@ -64,15 +64,15 @@ DECLARE_EVENT_CLASS(ice_rx_dim_template,
TP_ARGS(q_vector, dim),
TP_STRUCT__entry(__field(struct ice_q_vector *, q_vector)
__field(struct dim *, dim)
- __string(devname, q_vector->rx.ring->netdev->name)),
+ __string(devname, q_vector->rx.rx_ring->netdev->name)),
TP_fast_assign(__entry->q_vector = q_vector;
__entry->dim = dim;
- __assign_str(devname, q_vector->rx.ring->netdev->name);),
+ __assign_str(devname, q_vector->rx.rx_ring->netdev->name);),
TP_printk("netdev: %s Rx-Q: %d dim-state: %d dim-profile: %d dim-tune: %d dim-st-right: %d dim-st-left: %d dim-tired: %d",
__get_str(devname),
- __entry->q_vector->rx.ring->q_index,
+ __entry->q_vector->rx.rx_ring->q_index,
__entry->dim->state,
__entry->dim->profile_ix,
__entry->dim->tune_state,
@@ -91,15 +91,15 @@ DECLARE_EVENT_CLASS(ice_tx_dim_template,
TP_ARGS(q_vector, dim),
TP_STRUCT__entry(__field(struct ice_q_vector *, q_vector)
__field(struct dim *, dim)
- __string(devname, q_vector->tx.ring->netdev->name)),
+ __string(devname, q_vector->tx.tx_ring->netdev->name)),
TP_fast_assign(__entry->q_vector = q_vector;
__entry->dim = dim;
- __assign_str(devname, q_vector->tx.ring->netdev->name);),
+ __assign_str(devname, q_vector->tx.tx_ring->netdev->name);),
TP_printk("netdev: %s Tx-Q: %d dim-state: %d dim-profile: %d dim-tune: %d dim-st-right: %d dim-st-left: %d dim-tired: %d",
__get_str(devname),
- __entry->q_vector->tx.ring->q_index,
+ __entry->q_vector->tx.tx_ring->q_index,
__entry->dim->state,
__entry->dim->profile_ix,
__entry->dim->tune_state,
@@ -115,7 +115,7 @@ DEFINE_EVENT(ice_tx_dim_template, ice_tx_dim_work,
/* Events related to a vsi & ring */
DECLARE_EVENT_CLASS(ice_tx_template,
- TP_PROTO(struct ice_ring *ring, struct ice_tx_desc *desc,
+ TP_PROTO(struct ice_tx_ring *ring, struct ice_tx_desc *desc,
struct ice_tx_buf *buf),
TP_ARGS(ring, desc, buf),
@@ -135,7 +135,7 @@ DECLARE_EVENT_CLASS(ice_tx_template,
#define DEFINE_TX_TEMPLATE_OP_EVENT(name) \
DEFINE_EVENT(ice_tx_template, name, \
- TP_PROTO(struct ice_ring *ring, \
+ TP_PROTO(struct ice_tx_ring *ring, \
struct ice_tx_desc *desc, \
struct ice_tx_buf *buf), \
TP_ARGS(ring, desc, buf))
@@ -145,7 +145,7 @@ DEFINE_TX_TEMPLATE_OP_EVENT(ice_clean_tx_irq_unmap);
DEFINE_TX_TEMPLATE_OP_EVENT(ice_clean_tx_irq_unmap_eop);
DECLARE_EVENT_CLASS(ice_rx_template,
- TP_PROTO(struct ice_ring *ring, union ice_32b_rx_flex_desc *desc),
+ TP_PROTO(struct ice_rx_ring *ring, union ice_32b_rx_flex_desc *desc),
TP_ARGS(ring, desc),
@@ -161,12 +161,12 @@ DECLARE_EVENT_CLASS(ice_rx_template,
__entry->ring, __entry->desc)
);
DEFINE_EVENT(ice_rx_template, ice_clean_rx_irq,
- TP_PROTO(struct ice_ring *ring, union ice_32b_rx_flex_desc *desc),
+ TP_PROTO(struct ice_rx_ring *ring, union ice_32b_rx_flex_desc *desc),
TP_ARGS(ring, desc)
);
DECLARE_EVENT_CLASS(ice_rx_indicate_template,
- TP_PROTO(struct ice_ring *ring, union ice_32b_rx_flex_desc *desc,
+ TP_PROTO(struct ice_rx_ring *ring, union ice_32b_rx_flex_desc *desc,
struct sk_buff *skb),
TP_ARGS(ring, desc, skb),
@@ -186,13 +186,13 @@ DECLARE_EVENT_CLASS(ice_rx_indicate_template,
);
DEFINE_EVENT(ice_rx_indicate_template, ice_clean_rx_irq_indicate,
- TP_PROTO(struct ice_ring *ring, union ice_32b_rx_flex_desc *desc,
+ TP_PROTO(struct ice_rx_ring *ring, union ice_32b_rx_flex_desc *desc,
struct sk_buff *skb),
TP_ARGS(ring, desc, skb)
);
DECLARE_EVENT_CLASS(ice_xmit_template,
- TP_PROTO(struct ice_ring *ring, struct sk_buff *skb),
+ TP_PROTO(struct ice_tx_ring *ring, struct sk_buff *skb),
TP_ARGS(ring, skb),
@@ -210,7 +210,7 @@ DECLARE_EVENT_CLASS(ice_xmit_template,
#define DEFINE_XMIT_TEMPLATE_OP_EVENT(name) \
DEFINE_EVENT(ice_xmit_template, name, \
- TP_PROTO(struct ice_ring *ring, struct sk_buff *skb), \
+ TP_PROTO(struct ice_tx_ring *ring, struct sk_buff *skb), \
TP_ARGS(ring, skb))
DEFINE_XMIT_TEMPLATE_OP_EVENT(ice_xmit_frame_ring);
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index 6ee8e0032d52..bc3ba19dc88f 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -6,6 +6,7 @@
#include <linux/prefetch.h>
#include <linux/mm.h>
#include <linux/bpf_trace.h>
+#include <net/dsfield.h>
#include <net/xdp.h>
#include "ice_txrx_lib.h"
#include "ice_lib.h"
@@ -13,6 +14,7 @@
#include "ice_trace.h"
#include "ice_dcb_lib.h"
#include "ice_xsk.h"
+#include "ice_eswitch.h"
#define ICE_RX_HDR_SIZE 256
@@ -32,7 +34,7 @@ ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc,
struct ice_tx_buf *tx_buf, *first;
struct ice_fltr_desc *f_desc;
struct ice_tx_desc *tx_desc;
- struct ice_ring *tx_ring;
+ struct ice_tx_ring *tx_ring;
struct device *dev;
dma_addr_t dma;
u32 td_cmd;
@@ -106,7 +108,7 @@ ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc,
* @tx_buf: the buffer to free
*/
static void
-ice_unmap_and_free_tx_buf(struct ice_ring *ring, struct ice_tx_buf *tx_buf)
+ice_unmap_and_free_tx_buf(struct ice_tx_ring *ring, struct ice_tx_buf *tx_buf)
{
if (tx_buf->skb) {
if (tx_buf->tx_flags & ICE_TX_FLAGS_DUMMY_PKT)
@@ -133,7 +135,7 @@ ice_unmap_and_free_tx_buf(struct ice_ring *ring, struct ice_tx_buf *tx_buf)
/* tx_buf must be completely set up in the transmit path */
}
-static struct netdev_queue *txring_txq(const struct ice_ring *ring)
+static struct netdev_queue *txring_txq(const struct ice_tx_ring *ring)
{
return netdev_get_tx_queue(ring->netdev, ring->q_index);
}
@@ -142,8 +144,9 @@ static struct netdev_queue *txring_txq(const struct ice_ring *ring)
* ice_clean_tx_ring - Free any empty Tx buffers
* @tx_ring: ring to be cleaned
*/
-void ice_clean_tx_ring(struct ice_ring *tx_ring)
+void ice_clean_tx_ring(struct ice_tx_ring *tx_ring)
{
+ u32 size;
u16 i;
if (ice_ring_is_xdp(tx_ring) && tx_ring->xsk_pool) {
@@ -162,8 +165,10 @@ void ice_clean_tx_ring(struct ice_ring *tx_ring)
tx_skip_free:
memset(tx_ring->tx_buf, 0, sizeof(*tx_ring->tx_buf) * tx_ring->count);
+ size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc),
+ PAGE_SIZE);
/* Zero out the descriptor ring */
- memset(tx_ring->desc, 0, tx_ring->size);
+ memset(tx_ring->desc, 0, size);
tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
@@ -181,14 +186,18 @@ tx_skip_free:
*
* Free all transmit software resources
*/
-void ice_free_tx_ring(struct ice_ring *tx_ring)
+void ice_free_tx_ring(struct ice_tx_ring *tx_ring)
{
+ u32 size;
+
ice_clean_tx_ring(tx_ring);
devm_kfree(tx_ring->dev, tx_ring->tx_buf);
tx_ring->tx_buf = NULL;
if (tx_ring->desc) {
- dmam_free_coherent(tx_ring->dev, tx_ring->size,
+ size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc),
+ PAGE_SIZE);
+ dmam_free_coherent(tx_ring->dev, size,
tx_ring->desc, tx_ring->dma);
tx_ring->desc = NULL;
}
@@ -201,7 +210,7 @@ void ice_free_tx_ring(struct ice_ring *tx_ring)
*
* Returns true if there's any budget left (e.g. the clean is finished)
*/
-static bool ice_clean_tx_irq(struct ice_ring *tx_ring, int napi_budget)
+static bool ice_clean_tx_irq(struct ice_tx_ring *tx_ring, int napi_budget)
{
unsigned int total_bytes = 0, total_pkts = 0;
unsigned int budget = ICE_DFLT_IRQ_WORK;
@@ -238,11 +247,8 @@ static bool ice_clean_tx_irq(struct ice_ring *tx_ring, int napi_budget)
total_bytes += tx_buf->bytecount;
total_pkts += tx_buf->gso_segs;
- if (ice_ring_is_xdp(tx_ring))
- page_frag_free(tx_buf->raw_buf);
- else
- /* free the skb */
- napi_consume_skb(tx_buf->skb, napi_budget);
+ /* free the skb */
+ napi_consume_skb(tx_buf->skb, napi_budget);
/* unmap skb header data */
dma_unmap_single(tx_ring->dev,
@@ -298,9 +304,6 @@ static bool ice_clean_tx_irq(struct ice_ring *tx_ring, int napi_budget)
ice_update_tx_ring_stats(tx_ring, total_pkts, total_bytes);
- if (ice_ring_is_xdp(tx_ring))
- return !!budget;
-
netdev_tx_completed_queue(txring_txq(tx_ring), total_pkts,
total_bytes);
@@ -329,9 +332,10 @@ static bool ice_clean_tx_irq(struct ice_ring *tx_ring, int napi_budget)
*
* Return 0 on success, negative on error
*/
-int ice_setup_tx_ring(struct ice_ring *tx_ring)
+int ice_setup_tx_ring(struct ice_tx_ring *tx_ring)
{
struct device *dev = tx_ring->dev;
+ u32 size;
if (!dev)
return -ENOMEM;
@@ -339,19 +343,19 @@ int ice_setup_tx_ring(struct ice_ring *tx_ring)
/* warn if we are about to overwrite the pointer */
WARN_ON(tx_ring->tx_buf);
tx_ring->tx_buf =
- devm_kzalloc(dev, sizeof(*tx_ring->tx_buf) * tx_ring->count,
+ devm_kcalloc(dev, sizeof(*tx_ring->tx_buf), tx_ring->count,
GFP_KERNEL);
if (!tx_ring->tx_buf)
return -ENOMEM;
/* round up to nearest page */
- tx_ring->size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc),
- PAGE_SIZE);
- tx_ring->desc = dmam_alloc_coherent(dev, tx_ring->size, &tx_ring->dma,
+ size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc),
+ PAGE_SIZE);
+ tx_ring->desc = dmam_alloc_coherent(dev, size, &tx_ring->dma,
GFP_KERNEL);
if (!tx_ring->desc) {
dev_err(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
- tx_ring->size);
+ size);
goto err;
}
@@ -370,9 +374,10 @@ err:
* ice_clean_rx_ring - Free Rx buffers
* @rx_ring: ring to be cleaned
*/
-void ice_clean_rx_ring(struct ice_ring *rx_ring)
+void ice_clean_rx_ring(struct ice_rx_ring *rx_ring)
{
struct device *dev = rx_ring->dev;
+ u32 size;
u16 i;
/* ring already cleared, nothing to do */
@@ -417,7 +422,9 @@ rx_skip_free:
memset(rx_ring->rx_buf, 0, sizeof(*rx_ring->rx_buf) * rx_ring->count);
/* Zero out the descriptor ring */
- memset(rx_ring->desc, 0, rx_ring->size);
+ size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
+ PAGE_SIZE);
+ memset(rx_ring->desc, 0, size);
rx_ring->next_to_alloc = 0;
rx_ring->next_to_clean = 0;
@@ -430,8 +437,10 @@ rx_skip_free:
*
* Free all receive software resources
*/
-void ice_free_rx_ring(struct ice_ring *rx_ring)
+void ice_free_rx_ring(struct ice_rx_ring *rx_ring)
{
+ u32 size;
+
ice_clean_rx_ring(rx_ring);
if (rx_ring->vsi->type == ICE_VSI_PF)
if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
@@ -441,7 +450,9 @@ void ice_free_rx_ring(struct ice_ring *rx_ring)
rx_ring->rx_buf = NULL;
if (rx_ring->desc) {
- dmam_free_coherent(rx_ring->dev, rx_ring->size,
+ size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
+ PAGE_SIZE);
+ dmam_free_coherent(rx_ring->dev, size,
rx_ring->desc, rx_ring->dma);
rx_ring->desc = NULL;
}
@@ -453,9 +464,10 @@ void ice_free_rx_ring(struct ice_ring *rx_ring)
*
* Return 0 on success, negative on error
*/
-int ice_setup_rx_ring(struct ice_ring *rx_ring)
+int ice_setup_rx_ring(struct ice_rx_ring *rx_ring)
{
struct device *dev = rx_ring->dev;
+ u32 size;
if (!dev)
return -ENOMEM;
@@ -463,19 +475,19 @@ int ice_setup_rx_ring(struct ice_ring *rx_ring)
/* warn if we are about to overwrite the pointer */
WARN_ON(rx_ring->rx_buf);
rx_ring->rx_buf =
- devm_kzalloc(dev, sizeof(*rx_ring->rx_buf) * rx_ring->count,
+ devm_kcalloc(dev, sizeof(*rx_ring->rx_buf), rx_ring->count,
GFP_KERNEL);
if (!rx_ring->rx_buf)
return -ENOMEM;
/* round up to nearest page */
- rx_ring->size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
- PAGE_SIZE);
- rx_ring->desc = dmam_alloc_coherent(dev, rx_ring->size, &rx_ring->dma,
+ size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
+ PAGE_SIZE);
+ rx_ring->desc = dmam_alloc_coherent(dev, size, &rx_ring->dma,
GFP_KERNEL);
if (!rx_ring->desc) {
dev_err(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
- rx_ring->size);
+ size);
goto err;
}
@@ -499,7 +511,7 @@ err:
}
static unsigned int
-ice_rx_frame_truesize(struct ice_ring *rx_ring, unsigned int __maybe_unused size)
+ice_rx_frame_truesize(struct ice_rx_ring *rx_ring, unsigned int __maybe_unused size)
{
unsigned int truesize;
@@ -519,15 +531,15 @@ ice_rx_frame_truesize(struct ice_ring *rx_ring, unsigned int __maybe_unused size
* @rx_ring: Rx ring
* @xdp: xdp_buff used as input to the XDP program
* @xdp_prog: XDP program to run
+ * @xdp_ring: ring to be used for XDP_TX action
*
* Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR}
*/
static int
-ice_run_xdp(struct ice_ring *rx_ring, struct xdp_buff *xdp,
- struct bpf_prog *xdp_prog)
+ice_run_xdp(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
+ struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring)
{
- struct ice_ring *xdp_ring;
- int err, result;
+ int err;
u32 act;
act = bpf_prog_run_xdp(xdp_prog, xdp);
@@ -535,11 +547,14 @@ ice_run_xdp(struct ice_ring *rx_ring, struct xdp_buff *xdp,
case XDP_PASS:
return ICE_XDP_PASS;
case XDP_TX:
- xdp_ring = rx_ring->vsi->xdp_rings[smp_processor_id()];
- result = ice_xmit_xdp_buff(xdp, xdp_ring);
- if (result == ICE_XDP_CONSUMED)
+ if (static_branch_unlikely(&ice_xdp_locking_key))
+ spin_lock(&xdp_ring->tx_lock);
+ err = ice_xmit_xdp_ring(xdp->data, xdp->data_end - xdp->data, xdp_ring);
+ if (static_branch_unlikely(&ice_xdp_locking_key))
+ spin_unlock(&xdp_ring->tx_lock);
+ if (err == ICE_XDP_CONSUMED)
goto out_failure;
- return result;
+ return err;
case XDP_REDIRECT:
err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
if (err)
@@ -576,7 +591,7 @@ ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
struct ice_netdev_priv *np = netdev_priv(dev);
unsigned int queue_index = smp_processor_id();
struct ice_vsi *vsi = np->vsi;
- struct ice_ring *xdp_ring;
+ struct ice_tx_ring *xdp_ring;
int nxmit = 0, i;
if (test_bit(ICE_VSI_DOWN, vsi->state))
@@ -588,7 +603,14 @@ ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
return -EINVAL;
- xdp_ring = vsi->xdp_rings[queue_index];
+ if (static_branch_unlikely(&ice_xdp_locking_key)) {
+ queue_index %= vsi->num_xdp_txq;
+ xdp_ring = vsi->xdp_rings[queue_index];
+ spin_lock(&xdp_ring->tx_lock);
+ } else {
+ xdp_ring = vsi->xdp_rings[queue_index];
+ }
+
for (i = 0; i < n; i++) {
struct xdp_frame *xdpf = frames[i];
int err;
@@ -602,6 +624,9 @@ ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
if (unlikely(flags & XDP_XMIT_FLUSH))
ice_xdp_ring_update_tail(xdp_ring);
+ if (static_branch_unlikely(&ice_xdp_locking_key))
+ spin_unlock(&xdp_ring->tx_lock);
+
return nxmit;
}
@@ -614,7 +639,7 @@ ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
* reused.
*/
static bool
-ice_alloc_mapped_page(struct ice_ring *rx_ring, struct ice_rx_buf *bi)
+ice_alloc_mapped_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *bi)
{
struct page *page = bi->page;
dma_addr_t dma;
@@ -665,7 +690,7 @@ ice_alloc_mapped_page(struct ice_ring *rx_ring, struct ice_rx_buf *bi)
* buffers. Then bump tail at most one time. Grouping like this lets us avoid
* multiple tail writes per call.
*/
-bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count)
+bool ice_alloc_rx_bufs(struct ice_rx_ring *rx_ring, u16 cleaned_count)
{
union ice_32b_rx_flex_desc *rx_desc;
u16 ntu = rx_ring->next_to_use;
@@ -794,7 +819,7 @@ ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf, int rx_buf_pgcnt)
* The function will then update the page offset.
*/
static void
-ice_add_rx_frag(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
+ice_add_rx_frag(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
struct sk_buff *skb, unsigned int size)
{
#if (PAGE_SIZE >= 8192)
@@ -820,7 +845,7 @@ ice_add_rx_frag(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
* Synchronizes page for reuse by the adapter
*/
static void
-ice_reuse_rx_page(struct ice_ring *rx_ring, struct ice_rx_buf *old_buf)
+ice_reuse_rx_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *old_buf)
{
u16 nta = rx_ring->next_to_alloc;
struct ice_rx_buf *new_buf;
@@ -851,7 +876,7 @@ ice_reuse_rx_page(struct ice_ring *rx_ring, struct ice_rx_buf *old_buf)
* for use by the CPU.
*/
static struct ice_rx_buf *
-ice_get_rx_buf(struct ice_ring *rx_ring, const unsigned int size,
+ice_get_rx_buf(struct ice_rx_ring *rx_ring, const unsigned int size,
int *rx_buf_pgcnt)
{
struct ice_rx_buf *rx_buf;
@@ -888,7 +913,7 @@ ice_get_rx_buf(struct ice_ring *rx_ring, const unsigned int size,
* to set up the skb correctly and avoid any memcpy overhead.
*/
static struct sk_buff *
-ice_build_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
+ice_build_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
struct xdp_buff *xdp)
{
u8 metasize = xdp->data - xdp->data_meta;
@@ -940,7 +965,7 @@ ice_build_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
* skb correctly.
*/
static struct sk_buff *
-ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
+ice_construct_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
struct xdp_buff *xdp)
{
unsigned int size = xdp->data_end - xdp->data;
@@ -1000,7 +1025,7 @@ ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
* the associated resources.
*/
static void
-ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
+ice_put_rx_buf(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
int rx_buf_pgcnt)
{
u16 ntc = rx_ring->next_to_clean + 1;
@@ -1036,7 +1061,7 @@ ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
* otherwise return true indicating that this is in fact a non-EOP buffer.
*/
static bool
-ice_is_non_eop(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc)
+ice_is_non_eop(struct ice_rx_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc)
{
/* if we are the last buffer then there is nothing else to do */
#define ICE_RXD_EOF BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S)
@@ -1060,11 +1085,12 @@ ice_is_non_eop(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc)
*
* Returns amount of work completed
*/
-int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
+int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
{
unsigned int total_rx_bytes = 0, total_rx_pkts = 0, frame_sz = 0;
u16 cleaned_count = ICE_DESC_UNUSED(rx_ring);
unsigned int offset = rx_ring->rx_offset;
+ struct ice_tx_ring *xdp_ring = NULL;
unsigned int xdp_res, xdp_xmit = 0;
struct sk_buff *skb = rx_ring->skb;
struct bpf_prog *xdp_prog = NULL;
@@ -1077,6 +1103,10 @@ int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
#endif
xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq);
+ xdp_prog = READ_ONCE(rx_ring->xdp_prog);
+ if (xdp_prog)
+ xdp_ring = rx_ring->xdp_ring;
+
/* start the loop to process Rx packets bounded by 'budget' */
while (likely(total_rx_pkts < (unsigned int)budget)) {
union ice_32b_rx_flex_desc *rx_desc;
@@ -1140,11 +1170,10 @@ int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
xdp.frame_sz = ice_rx_frame_truesize(rx_ring, size);
#endif
- xdp_prog = READ_ONCE(rx_ring->xdp_prog);
if (!xdp_prog)
goto construct_skb;
- xdp_res = ice_run_xdp(rx_ring, &xdp, xdp_prog);
+ xdp_res = ice_run_xdp(rx_ring, &xdp, xdp_prog, xdp_ring);
if (!xdp_res)
goto construct_skb;
if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR)) {
@@ -1221,7 +1250,7 @@ construct_skb:
failure = ice_alloc_rx_bufs(rx_ring, cleaned_count);
if (xdp_prog)
- ice_finalize_xdp_rx(rx_ring, xdp_xmit);
+ ice_finalize_xdp_rx(xdp_ring, xdp_xmit);
rx_ring->skb = skb;
ice_update_rx_ring_stats(rx_ring, total_rx_pkts, total_rx_bytes);
@@ -1230,6 +1259,41 @@ construct_skb:
return failure ? budget : (int)total_rx_pkts;
}
+static void __ice_update_sample(struct ice_q_vector *q_vector,
+ struct ice_ring_container *rc,
+ struct dim_sample *sample,
+ bool is_tx)
+{
+ u64 packets = 0, bytes = 0;
+
+ if (is_tx) {
+ struct ice_tx_ring *tx_ring;
+
+ ice_for_each_tx_ring(tx_ring, *rc) {
+ packets += tx_ring->stats.pkts;
+ bytes += tx_ring->stats.bytes;
+ }
+ } else {
+ struct ice_rx_ring *rx_ring;
+
+ ice_for_each_rx_ring(rx_ring, *rc) {
+ packets += rx_ring->stats.pkts;
+ bytes += rx_ring->stats.bytes;
+ }
+ }
+
+ dim_update_sample(q_vector->total_events, packets, bytes, sample);
+ sample->comp_ctr = 0;
+
+ /* if dim settings get stale, like when not updated for 1
+ * second or longer, force it to start again. This addresses the
+ * frequent case of an idle queue being switched to by the
+ * scheduler. The 1,000 here means 1,000 milliseconds.
+ */
+ if (ktime_ms_delta(sample->time, rc->dim.start_sample.time) >= 1000)
+ rc->dim.state = DIM_START_MEASURE;
+}
+
/**
* ice_net_dim - Update net DIM algorithm
* @q_vector: the vector associated with the interrupt
@@ -1245,34 +1309,16 @@ static void ice_net_dim(struct ice_q_vector *q_vector)
struct ice_ring_container *rx = &q_vector->rx;
if (ITR_IS_DYNAMIC(tx)) {
- struct dim_sample dim_sample = {};
- u64 packets = 0, bytes = 0;
- struct ice_ring *ring;
-
- ice_for_each_ring(ring, q_vector->tx) {
- packets += ring->stats.pkts;
- bytes += ring->stats.bytes;
- }
-
- dim_update_sample(q_vector->total_events, packets, bytes,
- &dim_sample);
+ struct dim_sample dim_sample;
+ __ice_update_sample(q_vector, tx, &dim_sample, true);
net_dim(&tx->dim, dim_sample);
}
if (ITR_IS_DYNAMIC(rx)) {
- struct dim_sample dim_sample = {};
- u64 packets = 0, bytes = 0;
- struct ice_ring *ring;
-
- ice_for_each_ring(ring, q_vector->rx) {
- packets += ring->stats.pkts;
- bytes += ring->stats.bytes;
- }
-
- dim_update_sample(q_vector->total_events, packets, bytes,
- &dim_sample);
+ struct dim_sample dim_sample;
+ __ice_update_sample(q_vector, rx, &dim_sample, false);
net_dim(&rx->dim, dim_sample);
}
}
@@ -1299,15 +1345,14 @@ static u32 ice_buildreg_itr(u16 itr_idx, u16 itr)
}
/**
- * ice_update_ena_itr - Update ITR moderation and re-enable MSI-X interrupt
+ * ice_enable_interrupt - re-enable MSI-X interrupt
* @q_vector: the vector associated with the interrupt to enable
*
- * Update the net_dim() algorithm and re-enable the interrupt associated with
- * this vector.
- *
- * If the VSI is down, the interrupt will not be re-enabled.
+ * If the VSI is down, the interrupt will not be re-enabled. Also,
+ * when enabling the interrupt always reset the wb_on_itr to false
+ * and trigger a software interrupt to clean out internal state.
*/
-static void ice_update_ena_itr(struct ice_q_vector *q_vector)
+static void ice_enable_interrupt(struct ice_q_vector *q_vector)
{
struct ice_vsi *vsi = q_vector->vsi;
bool wb_en = q_vector->wb_on_itr;
@@ -1316,25 +1361,25 @@ static void ice_update_ena_itr(struct ice_q_vector *q_vector)
if (test_bit(ICE_DOWN, vsi->state))
return;
- /* When exiting WB_ON_ITR, let ITR resume its normal
- * interrupts-enabled path.
+ /* trigger an ITR delayed software interrupt when exiting busy poll, to
+ * make sure to catch any pending cleanups that might have been missed
+ * due to interrupt state transition. If busy poll or poll isn't
+ * enabled, then don't update ITR, and just enable the interrupt.
*/
- if (wb_en)
+ if (!wb_en) {
+ itr_val = ice_buildreg_itr(ICE_ITR_NONE, 0);
+ } else {
q_vector->wb_on_itr = false;
- /* This will do nothing if dynamic updates are not enabled. */
- ice_net_dim(q_vector);
-
- /* net_dim() updates ITR out-of-band using a work item */
- itr_val = ice_buildreg_itr(ICE_ITR_NONE, 0);
- /* trigger an immediate software interrupt when exiting
- * busy poll, to make sure to catch any pending cleanups
- * that might have been missed due to interrupt state
- * transition.
- */
- if (wb_en) {
+ /* do two things here with a single write. Set up the third ITR
+ * index to be used for software interrupt moderation, and then
+ * trigger a software interrupt with a rate limit of 20K on
+ * software interrupts, this will help avoid high interrupt
+ * loads due to frequently polling and exiting polling.
+ */
+ itr_val = ice_buildreg_itr(ICE_IDX_ITR2, ICE_ITR_20K);
itr_val |= GLINT_DYN_CTL_SWINT_TRIG_M |
- GLINT_DYN_CTL_SW_ITR_INDX_M |
+ ICE_IDX_ITR2 << GLINT_DYN_CTL_SW_ITR_INDX_S |
GLINT_DYN_CTL_SW_ITR_INDX_ENA_M;
}
wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), itr_val);
@@ -1387,18 +1432,24 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
{
struct ice_q_vector *q_vector =
container_of(napi, struct ice_q_vector, napi);
+ struct ice_tx_ring *tx_ring;
+ struct ice_rx_ring *rx_ring;
bool clean_complete = true;
- struct ice_ring *ring;
int budget_per_ring;
int work_done = 0;
/* Since the actual Tx work is minimal, we can give the Tx a larger
* budget and be more aggressive about cleaning up the Tx descriptors.
*/
- ice_for_each_ring(ring, q_vector->tx) {
- bool wd = ring->xsk_pool ?
- ice_clean_tx_irq_zc(ring, budget) :
- ice_clean_tx_irq(ring, budget);
+ ice_for_each_tx_ring(tx_ring, q_vector->tx) {
+ bool wd;
+
+ if (tx_ring->xsk_pool)
+ wd = ice_clean_tx_irq_zc(tx_ring, budget);
+ else if (ice_ring_is_xdp(tx_ring))
+ wd = true;
+ else
+ wd = ice_clean_tx_irq(tx_ring, budget);
if (!wd)
clean_complete = false;
@@ -1419,16 +1470,16 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
/* Max of 1 Rx ring in this q_vector so give it the budget */
budget_per_ring = budget;
- ice_for_each_ring(ring, q_vector->rx) {
+ ice_for_each_rx_ring(rx_ring, q_vector->rx) {
int cleaned;
/* A dedicated path for zero-copy allows making a single
* comparison in the irq context instead of many inside the
* ice_clean_rx_irq function and makes the codebase cleaner.
*/
- cleaned = ring->xsk_pool ?
- ice_clean_rx_irq_zc(ring, budget_per_ring) :
- ice_clean_rx_irq(ring, budget_per_ring);
+ cleaned = rx_ring->xsk_pool ?
+ ice_clean_rx_irq_zc(rx_ring, budget_per_ring) :
+ ice_clean_rx_irq(rx_ring, budget_per_ring);
work_done += cleaned;
/* if we clean as many as budgeted, we must not be done */
if (cleaned >= budget_per_ring)
@@ -1447,10 +1498,12 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
/* Exit the polling mode, but don't re-enable interrupts if stack might
* poll us due to busy-polling
*/
- if (likely(napi_complete_done(napi, work_done)))
- ice_update_ena_itr(q_vector);
- else
+ if (likely(napi_complete_done(napi, work_done))) {
+ ice_net_dim(q_vector);
+ ice_enable_interrupt(q_vector);
+ } else {
ice_set_wb_on_itr(q_vector);
+ }
return min_t(int, work_done, budget - 1);
}
@@ -1462,7 +1515,7 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
*
* Returns -EBUSY if a stop is needed, else 0
*/
-static int __ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size)
+static int __ice_maybe_stop_tx(struct ice_tx_ring *tx_ring, unsigned int size)
{
netif_stop_subqueue(tx_ring->netdev, tx_ring->q_index);
/* Memory barrier before checking head and tail */
@@ -1485,7 +1538,7 @@ static int __ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size)
*
* Returns 0 if stop is not needed
*/
-static int ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size)
+static int ice_maybe_stop_tx(struct ice_tx_ring *tx_ring, unsigned int size)
{
if (likely(ICE_DESC_UNUSED(tx_ring) >= size))
return 0;
@@ -1504,7 +1557,7 @@ static int ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size)
* it and the length into the transmit descriptor.
*/
static void
-ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first,
+ice_tx_map(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first,
struct ice_tx_offload_params *off)
{
u64 td_offset, td_tag, td_cmd;
@@ -1840,7 +1893,7 @@ int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
* related to VLAN tagging for the HW, such as VLAN, DCB, etc.
*/
static void
-ice_tx_prepare_vlan_flags(struct ice_ring *tx_ring, struct ice_tx_buf *first)
+ice_tx_prepare_vlan_flags(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first)
{
struct sk_buff *skb = first->skb;
@@ -2146,7 +2199,7 @@ static bool ice_chk_linearize(struct sk_buff *skb, unsigned int count)
* @off: Tx offload parameters
*/
static void
-ice_tstamp(struct ice_ring *tx_ring, struct sk_buff *skb,
+ice_tstamp(struct ice_tx_ring *tx_ring, struct sk_buff *skb,
struct ice_tx_buf *first, struct ice_tx_offload_params *off)
{
s8 idx;
@@ -2181,7 +2234,7 @@ ice_tstamp(struct ice_ring *tx_ring, struct sk_buff *skb,
* Returns NETDEV_TX_OK if sent, else an error code
*/
static netdev_tx_t
-ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring)
+ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring)
{
struct ice_tx_offload_params offload = { 0 };
struct ice_vsi *vsi = tx_ring->vsi;
@@ -2245,6 +2298,8 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring)
ICE_TXD_CTX_QW1_CMD_S);
ice_tstamp(tx_ring, skb, first, &offload);
+ if (ice_is_switchdev_running(vsi->back))
+ ice_eswitch_set_target_vsi(skb, &offload);
if (offload.cd_qw1 & ICE_TX_DESC_DTYPE_CTX) {
struct ice_tx_ctx_desc *cdesc;
@@ -2282,7 +2337,7 @@ netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
- struct ice_ring *tx_ring;
+ struct ice_tx_ring *tx_ring;
tx_ring = vsi->tx_rings[skb->queue_mapping];
@@ -2296,10 +2351,43 @@ netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev)
}
/**
+ * ice_get_dscp_up - return the UP/TC value for a SKB
+ * @dcbcfg: DCB config that contains DSCP to UP/TC mapping
+ * @skb: SKB to query for info to determine UP/TC
+ *
+ * This function is to only be called when the PF is in L3 DSCP PFC mode
+ */
+static u8 ice_get_dscp_up(struct ice_dcbx_cfg *dcbcfg, struct sk_buff *skb)
+{
+ u8 dscp = 0;
+
+ if (skb->protocol == htons(ETH_P_IP))
+ dscp = ipv4_get_dsfield(ip_hdr(skb)) >> 2;
+ else if (skb->protocol == htons(ETH_P_IPV6))
+ dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2;
+
+ return dcbcfg->dscp_map[dscp];
+}
+
+u16
+ice_select_queue(struct net_device *netdev, struct sk_buff *skb,
+ struct net_device *sb_dev)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct ice_dcbx_cfg *dcbcfg;
+
+ dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
+ if (dcbcfg->pfc_mode == ICE_QOS_MODE_DSCP)
+ skb->priority = ice_get_dscp_up(dcbcfg, skb);
+
+ return netdev_pick_tx(netdev, skb, sb_dev);
+}
+
+/**
* ice_clean_ctrl_tx_irq - interrupt handler for flow director Tx queue
* @tx_ring: tx_ring to clean
*/
-void ice_clean_ctrl_tx_irq(struct ice_ring *tx_ring)
+void ice_clean_ctrl_tx_irq(struct ice_tx_ring *tx_ring)
{
struct ice_vsi *vsi = tx_ring->vsi;
s16 i = tx_ring->next_to_clean;
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index 1e46e80f3d6f..c56dd1749903 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -13,6 +13,7 @@
#define ICE_MAX_CHAINED_RX_BUFS 5
#define ICE_MAX_BUF_TXD 8
#define ICE_MIN_TX_LEN 17
+#define ICE_TX_THRESH 32
/* The size limit for a transmit buffer in a descriptor is (16K - 1).
* In order to align with the read requests we will align the value to
@@ -154,7 +155,7 @@ struct ice_tx_buf {
struct ice_tx_offload_params {
u64 cd_qw1;
- struct ice_ring *tx_ring;
+ struct ice_tx_ring *tx_ring;
u32 td_cmd;
u32 td_offset;
u32 td_l2tag1;
@@ -164,17 +165,10 @@ struct ice_tx_offload_params {
};
struct ice_rx_buf {
- union {
- struct {
- dma_addr_t dma;
- struct page *page;
- unsigned int page_offset;
- u16 pagecnt_bias;
- };
- struct {
- struct xdp_buff *xdp;
- };
- };
+ dma_addr_t dma;
+ struct page *page;
+ unsigned int page_offset;
+ u16 pagecnt_bias;
};
struct ice_q_stats {
@@ -258,9 +252,9 @@ enum ice_dynamic_itr {
#define ICE_TX_LEGACY 1
/* descriptor ring, associated with a VSI */
-struct ice_ring {
+struct ice_rx_ring {
/* CL1 - 1st cacheline starts here */
- struct ice_ring *next; /* pointer to next ring in q_vector */
+ struct ice_rx_ring *next; /* pointer to next ring in q_vector */
void *desc; /* Descriptor ring memory */
struct device *dev; /* Used for DMA mapping */
struct net_device *netdev; /* netdev ring maps to */
@@ -268,14 +262,13 @@ struct ice_ring {
struct ice_q_vector *q_vector; /* Backreference to associated vector */
u8 __iomem *tail;
union {
- struct ice_tx_buf *tx_buf;
struct ice_rx_buf *rx_buf;
+ struct xdp_buff **xdp_buf;
};
/* CL2 - 2nd cacheline starts here */
+ struct xdp_rxq_info xdp_rxq;
+ /* CL3 - 3rd cacheline starts here */
u16 q_index; /* Queue number of ring */
- u16 q_handle; /* Queue handle per TC */
-
- u8 ring_active:1; /* is ring online or not */
u16 count; /* Number of descriptors */
u16 reg_idx; /* HW register index of the ring */
@@ -284,63 +277,104 @@ struct ice_ring {
u16 next_to_use;
u16 next_to_clean;
u16 next_to_alloc;
+ u16 rx_offset;
+ u16 rx_buf_len;
/* stats structs */
+ struct ice_rxq_stats rx_stats;
struct ice_q_stats stats;
struct u64_stats_sync syncp;
- union {
- struct ice_txq_stats tx_stats;
- struct ice_rxq_stats rx_stats;
- };
struct rcu_head rcu; /* to avoid race on free */
- DECLARE_BITMAP(xps_state, ICE_TX_NBITS); /* XPS Config State */
+ /* CL4 - 3rd cacheline starts here */
+ struct ice_channel *ch;
struct bpf_prog *xdp_prog;
+ struct ice_tx_ring *xdp_ring;
struct xsk_buff_pool *xsk_pool;
- u16 rx_offset;
- /* CL3 - 3rd cacheline starts here */
- struct xdp_rxq_info xdp_rxq;
struct sk_buff *skb;
- /* CLX - the below items are only accessed infrequently and should be
- * in their own cache line if possible
- */
-#define ICE_TX_FLAGS_RING_XDP BIT(0)
+ dma_addr_t dma; /* physical address of ring */
#define ICE_RX_FLAGS_RING_BUILD_SKB BIT(1)
+ u64 cached_phctime;
+ u8 dcb_tc; /* Traffic class of ring */
+ u8 ptp_rx;
u8 flags;
+} ____cacheline_internodealigned_in_smp;
+
+struct ice_tx_ring {
+ /* CL1 - 1st cacheline starts here */
+ struct ice_tx_ring *next; /* pointer to next ring in q_vector */
+ void *desc; /* Descriptor ring memory */
+ struct device *dev; /* Used for DMA mapping */
+ u8 __iomem *tail;
+ struct ice_tx_buf *tx_buf;
+ struct ice_q_vector *q_vector; /* Backreference to associated vector */
+ struct net_device *netdev; /* netdev ring maps to */
+ struct ice_vsi *vsi; /* Backreference to associated VSI */
+ /* CL2 - 2nd cacheline starts here */
dma_addr_t dma; /* physical address of ring */
- unsigned int size; /* length of descriptor ring in bytes */
+ struct xsk_buff_pool *xsk_pool;
+ u16 next_to_use;
+ u16 next_to_clean;
+ u16 next_rs;
+ u16 next_dd;
+ u16 q_handle; /* Queue handle per TC */
+ u16 reg_idx; /* HW register index of the ring */
+ u16 count; /* Number of descriptors */
+ u16 q_index; /* Queue number of ring */
+ /* stats structs */
+ struct ice_q_stats stats;
+ struct u64_stats_sync syncp;
+ struct ice_txq_stats tx_stats;
+
+ /* CL3 - 3rd cacheline starts here */
+ struct rcu_head rcu; /* to avoid race on free */
+ DECLARE_BITMAP(xps_state, ICE_TX_NBITS); /* XPS Config State */
+ struct ice_channel *ch;
+ struct ice_ptp_tx *tx_tstamps;
+ spinlock_t tx_lock;
u32 txq_teid; /* Added Tx queue TEID */
- u16 rx_buf_len;
+#define ICE_TX_FLAGS_RING_XDP BIT(0)
+ u8 flags;
u8 dcb_tc; /* Traffic class of ring */
- struct ice_ptp_tx *tx_tstamps;
- u64 cached_phctime;
- u8 ptp_rx:1;
- u8 ptp_tx:1;
+ u8 ptp_tx;
} ____cacheline_internodealigned_in_smp;
-static inline bool ice_ring_uses_build_skb(struct ice_ring *ring)
+static inline bool ice_ring_uses_build_skb(struct ice_rx_ring *ring)
{
return !!(ring->flags & ICE_RX_FLAGS_RING_BUILD_SKB);
}
-static inline void ice_set_ring_build_skb_ena(struct ice_ring *ring)
+static inline void ice_set_ring_build_skb_ena(struct ice_rx_ring *ring)
{
ring->flags |= ICE_RX_FLAGS_RING_BUILD_SKB;
}
-static inline void ice_clear_ring_build_skb_ena(struct ice_ring *ring)
+static inline void ice_clear_ring_build_skb_ena(struct ice_rx_ring *ring)
{
ring->flags &= ~ICE_RX_FLAGS_RING_BUILD_SKB;
}
-static inline bool ice_ring_is_xdp(struct ice_ring *ring)
+static inline bool ice_ring_ch_enabled(struct ice_tx_ring *ring)
+{
+ return !!ring->ch;
+}
+
+static inline bool ice_ring_is_xdp(struct ice_tx_ring *ring)
{
return !!(ring->flags & ICE_TX_FLAGS_RING_XDP);
}
+enum ice_container_type {
+ ICE_RX_CONTAINER,
+ ICE_TX_CONTAINER,
+};
+
struct ice_ring_container {
/* head of linked-list of rings */
- struct ice_ring *ring;
+ union {
+ struct ice_rx_ring *rx_ring;
+ struct ice_tx_ring *tx_ring;
+ };
struct dim dim; /* data for net_dim algorithm */
u16 itr_idx; /* index in the interrupt vector */
/* this matches the maximum number of ITR bits, but in usec
@@ -349,6 +383,7 @@ struct ice_ring_container {
u16 itr_setting:13;
u16 itr_reserved:2;
u16 itr_mode:1;
+ enum ice_container_type type;
};
struct ice_coalesce_stored {
@@ -360,10 +395,13 @@ struct ice_coalesce_stored {
};
/* iterator for handling rings in ring container */
-#define ice_for_each_ring(pos, head) \
- for (pos = (head).ring; pos; pos = pos->next)
+#define ice_for_each_rx_ring(pos, head) \
+ for (pos = (head).rx_ring; pos; pos = pos->next)
+
+#define ice_for_each_tx_ring(pos, head) \
+ for (pos = (head).tx_ring; pos; pos = pos->next)
-static inline unsigned int ice_rx_pg_order(struct ice_ring *ring)
+static inline unsigned int ice_rx_pg_order(struct ice_rx_ring *ring)
{
#if (PAGE_SIZE < 8192)
if (ring->rx_buf_len > (PAGE_SIZE / 2))
@@ -376,18 +414,21 @@ static inline unsigned int ice_rx_pg_order(struct ice_ring *ring)
union ice_32b_rx_flex_desc;
-bool ice_alloc_rx_bufs(struct ice_ring *rxr, u16 cleaned_count);
+bool ice_alloc_rx_bufs(struct ice_rx_ring *rxr, u16 cleaned_count);
netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev);
-void ice_clean_tx_ring(struct ice_ring *tx_ring);
-void ice_clean_rx_ring(struct ice_ring *rx_ring);
-int ice_setup_tx_ring(struct ice_ring *tx_ring);
-int ice_setup_rx_ring(struct ice_ring *rx_ring);
-void ice_free_tx_ring(struct ice_ring *tx_ring);
-void ice_free_rx_ring(struct ice_ring *rx_ring);
+u16
+ice_select_queue(struct net_device *dev, struct sk_buff *skb,
+ struct net_device *sb_dev);
+void ice_clean_tx_ring(struct ice_tx_ring *tx_ring);
+void ice_clean_rx_ring(struct ice_rx_ring *rx_ring);
+int ice_setup_tx_ring(struct ice_tx_ring *tx_ring);
+int ice_setup_rx_ring(struct ice_rx_ring *rx_ring);
+void ice_free_tx_ring(struct ice_tx_ring *tx_ring);
+void ice_free_rx_ring(struct ice_rx_ring *rx_ring);
int ice_napi_poll(struct napi_struct *napi, int budget);
int
ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc,
u8 *raw_packet);
-int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget);
-void ice_clean_ctrl_tx_irq(struct ice_ring *tx_ring);
+int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget);
+void ice_clean_ctrl_tx_irq(struct ice_tx_ring *tx_ring);
#endif /* _ICE_TXRX_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
index 171397dcf00a..1dd7e84f41f8 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
@@ -2,13 +2,15 @@
/* Copyright (c) 2019, Intel Corporation. */
#include "ice_txrx_lib.h"
+#include "ice_eswitch.h"
+#include "ice_lib.h"
/**
* ice_release_rx_desc - Store the new tail and head values
* @rx_ring: ring to bump
* @val: new head index
*/
-void ice_release_rx_desc(struct ice_ring *rx_ring, u16 val)
+void ice_release_rx_desc(struct ice_rx_ring *rx_ring, u16 val)
{
u16 prev_ntu = rx_ring->next_to_use & ~0x7;
@@ -66,7 +68,7 @@ static enum pkt_hash_types ice_ptype_to_htype(u16 ptype)
* @rx_ptype: the ptype value from the descriptor
*/
static void
-ice_rx_hash(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc,
+ice_rx_hash(struct ice_rx_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc,
struct sk_buff *skb, u16 rx_ptype)
{
struct ice_32b_rx_flex_desc_nic *nic_mdid;
@@ -93,7 +95,7 @@ ice_rx_hash(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc,
* skb->protocol must be set before this function is called
*/
static void
-ice_rx_csum(struct ice_ring *ring, struct sk_buff *skb,
+ice_rx_csum(struct ice_rx_ring *ring, struct sk_buff *skb,
union ice_32b_rx_flex_desc *rx_desc, u16 ptype)
{
struct ice_rx_ptype_decoded decoded;
@@ -178,14 +180,15 @@ checksum_fail:
* other fields within the skb.
*/
void
-ice_process_skb_fields(struct ice_ring *rx_ring,
+ice_process_skb_fields(struct ice_rx_ring *rx_ring,
union ice_32b_rx_flex_desc *rx_desc,
struct sk_buff *skb, u16 ptype)
{
ice_rx_hash(rx_ring, rx_desc, skb, ptype);
/* modifies the skb - consumes the enet header */
- skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+ skb->protocol = eth_type_trans(skb, ice_eswitch_get_target_netdev
+ (rx_ring, rx_desc));
ice_rx_csum(rx_ring, skb, rx_desc, ptype);
@@ -203,7 +206,7 @@ ice_process_skb_fields(struct ice_ring *rx_ring,
* gro receive functions (with/without VLAN tag)
*/
void
-ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag)
+ice_receive_skb(struct ice_rx_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag)
{
if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
(vlan_tag & VLAN_VID_MASK))
@@ -212,18 +215,67 @@ ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag)
}
/**
+ * ice_clean_xdp_irq - Reclaim resources after transmit completes on XDP ring
+ * @xdp_ring: XDP ring to clean
+ */
+static void ice_clean_xdp_irq(struct ice_tx_ring *xdp_ring)
+{
+ unsigned int total_bytes = 0, total_pkts = 0;
+ u16 ntc = xdp_ring->next_to_clean;
+ struct ice_tx_desc *next_dd_desc;
+ u16 next_dd = xdp_ring->next_dd;
+ struct ice_tx_buf *tx_buf;
+ int i;
+
+ next_dd_desc = ICE_TX_DESC(xdp_ring, next_dd);
+ if (!(next_dd_desc->cmd_type_offset_bsz &
+ cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
+ return;
+
+ for (i = 0; i < ICE_TX_THRESH; i++) {
+ tx_buf = &xdp_ring->tx_buf[ntc];
+
+ total_bytes += tx_buf->bytecount;
+ /* normally tx_buf->gso_segs was taken but at this point
+ * it's always 1 for us
+ */
+ total_pkts++;
+
+ page_frag_free(tx_buf->raw_buf);
+ dma_unmap_single(xdp_ring->dev, dma_unmap_addr(tx_buf, dma),
+ dma_unmap_len(tx_buf, len), DMA_TO_DEVICE);
+ dma_unmap_len_set(tx_buf, len, 0);
+ tx_buf->raw_buf = NULL;
+
+ ntc++;
+ if (ntc >= xdp_ring->count)
+ ntc = 0;
+ }
+
+ next_dd_desc->cmd_type_offset_bsz = 0;
+ xdp_ring->next_dd = xdp_ring->next_dd + ICE_TX_THRESH;
+ if (xdp_ring->next_dd > xdp_ring->count)
+ xdp_ring->next_dd = ICE_TX_THRESH - 1;
+ xdp_ring->next_to_clean = ntc;
+ ice_update_tx_ring_stats(xdp_ring, total_pkts, total_bytes);
+}
+
+/**
* ice_xmit_xdp_ring - submit single packet to XDP ring for transmission
* @data: packet data pointer
* @size: packet data size
* @xdp_ring: XDP ring for transmission
*/
-int ice_xmit_xdp_ring(void *data, u16 size, struct ice_ring *xdp_ring)
+int ice_xmit_xdp_ring(void *data, u16 size, struct ice_tx_ring *xdp_ring)
{
u16 i = xdp_ring->next_to_use;
struct ice_tx_desc *tx_desc;
struct ice_tx_buf *tx_buf;
dma_addr_t dma;
+ if (ICE_DESC_UNUSED(xdp_ring) < ICE_TX_THRESH)
+ ice_clean_xdp_irq(xdp_ring);
+
if (!unlikely(ICE_DESC_UNUSED(xdp_ring))) {
xdp_ring->tx_stats.tx_busy++;
return ICE_XDP_CONSUMED;
@@ -244,21 +296,26 @@ int ice_xmit_xdp_ring(void *data, u16 size, struct ice_ring *xdp_ring)
tx_desc = ICE_TX_DESC(xdp_ring, i);
tx_desc->buf_addr = cpu_to_le64(dma);
- tx_desc->cmd_type_offset_bsz = ice_build_ctob(ICE_TXD_LAST_DESC_CMD, 0,
+ tx_desc->cmd_type_offset_bsz = ice_build_ctob(ICE_TX_DESC_CMD_EOP, 0,
size, 0);
- /* Make certain all of the status bits have been updated
- * before next_to_watch is written.
- */
- smp_wmb();
-
i++;
- if (i == xdp_ring->count)
+ if (i == xdp_ring->count) {
i = 0;
-
- tx_buf->next_to_watch = tx_desc;
+ tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_rs);
+ tx_desc->cmd_type_offset_bsz |=
+ cpu_to_le64(ICE_TX_DESC_CMD_RS << ICE_TXD_QW1_CMD_S);
+ xdp_ring->next_rs = ICE_TX_THRESH - 1;
+ }
xdp_ring->next_to_use = i;
+ if (i > xdp_ring->next_rs) {
+ tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_rs);
+ tx_desc->cmd_type_offset_bsz |=
+ cpu_to_le64(ICE_TX_DESC_CMD_RS << ICE_TXD_QW1_CMD_S);
+ xdp_ring->next_rs += ICE_TX_THRESH;
+ }
+
return ICE_XDP_TX;
}
@@ -269,7 +326,7 @@ int ice_xmit_xdp_ring(void *data, u16 size, struct ice_ring *xdp_ring)
*
* Returns negative on failure, 0 on success.
*/
-int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_ring *xdp_ring)
+int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring)
{
struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
@@ -281,22 +338,23 @@ int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_ring *xdp_ring)
/**
* ice_finalize_xdp_rx - Bump XDP Tx tail and/or flush redirect map
- * @rx_ring: Rx ring
+ * @xdp_ring: XDP ring
* @xdp_res: Result of the receive batch
*
* This function bumps XDP Tx tail and/or flush redirect map, and
* should be called when a batch of packets has been processed in the
* napi loop.
*/
-void ice_finalize_xdp_rx(struct ice_ring *rx_ring, unsigned int xdp_res)
+void ice_finalize_xdp_rx(struct ice_tx_ring *xdp_ring, unsigned int xdp_res)
{
if (xdp_res & ICE_XDP_REDIR)
xdp_do_flush_map();
if (xdp_res & ICE_XDP_TX) {
- struct ice_ring *xdp_ring =
- rx_ring->vsi->xdp_rings[rx_ring->q_index];
-
+ if (static_branch_unlikely(&ice_xdp_locking_key))
+ spin_lock(&xdp_ring->tx_lock);
ice_xdp_ring_update_tail(xdp_ring);
+ if (static_branch_unlikely(&ice_xdp_locking_key))
+ spin_unlock(&xdp_ring->tx_lock);
}
}
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
index 05ac30752902..11b6c1601986 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
@@ -37,7 +37,7 @@ ice_build_ctob(u64 td_cmd, u64 td_offset, unsigned int size, u64 td_tag)
*
* This function updates the XDP Tx ring tail register.
*/
-static inline void ice_xdp_ring_update_tail(struct ice_ring *xdp_ring)
+static inline void ice_xdp_ring_update_tail(struct ice_tx_ring *xdp_ring)
{
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch.
@@ -46,14 +46,14 @@ static inline void ice_xdp_ring_update_tail(struct ice_ring *xdp_ring)
writel_relaxed(xdp_ring->next_to_use, xdp_ring->tail);
}
-void ice_finalize_xdp_rx(struct ice_ring *rx_ring, unsigned int xdp_res);
-int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_ring *xdp_ring);
-int ice_xmit_xdp_ring(void *data, u16 size, struct ice_ring *xdp_ring);
-void ice_release_rx_desc(struct ice_ring *rx_ring, u16 val);
+void ice_finalize_xdp_rx(struct ice_tx_ring *xdp_ring, unsigned int xdp_res);
+int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring);
+int ice_xmit_xdp_ring(void *data, u16 size, struct ice_tx_ring *xdp_ring);
+void ice_release_rx_desc(struct ice_rx_ring *rx_ring, u16 val);
void
-ice_process_skb_fields(struct ice_ring *rx_ring,
+ice_process_skb_fields(struct ice_rx_ring *rx_ring,
union ice_32b_rx_flex_desc *rx_desc,
struct sk_buff *skb, u16 ptype);
void
-ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag);
+ice_receive_skb(struct ice_rx_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag);
#endif /* !_ICE_TXRX_LIB_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index d33d1906103c..9e0c2923c62e 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -138,7 +138,9 @@ enum ice_vsi_type {
ICE_VSI_PF = 0,
ICE_VSI_VF = 1,
ICE_VSI_CTRL = 3, /* equates to ICE_VSI_PF with 1 queue pair */
+ ICE_VSI_CHNL = 4,
ICE_VSI_LB = 6,
+ ICE_VSI_SWITCHDEV_CTRL = 7,
};
struct ice_link_status {
@@ -569,6 +571,8 @@ struct ice_sched_vsi_info {
struct list_head list_entry;
u16 max_lanq[ICE_MAX_TRAFFIC_CLASS];
u16 max_rdmaq[ICE_MAX_TRAFFIC_CLASS];
+ /* bw_t_info saves VSI BW information */
+ struct ice_bw_type_info bw_t_info[ICE_MAX_TRAFFIC_CLASS];
};
/* driver defines the policy */
@@ -604,7 +608,8 @@ struct ice_dcb_app_priority_table {
};
#define ICE_MAX_USER_PRIORITY 8
-#define ICE_DCBX_MAX_APPS 32
+#define ICE_DCBX_MAX_APPS 64
+#define ICE_DSCP_NUM_VAL 64
#define ICE_LLDPDU_SIZE 1500
#define ICE_TLV_STATUS_OPER 0x1
#define ICE_TLV_STATUS_SYNC 0x2
@@ -622,7 +627,14 @@ struct ice_dcbx_cfg {
struct ice_dcb_ets_cfg etscfg;
struct ice_dcb_ets_cfg etsrec;
struct ice_dcb_pfc_cfg pfc;
+#define ICE_QOS_MODE_VLAN 0x0
+#define ICE_QOS_MODE_DSCP 0x1
+ u8 pfc_mode;
struct ice_dcb_app_priority_table app[ICE_DCBX_MAX_APPS];
+ /* when DSCP mapping defined by user set its bit to 1 */
+ DECLARE_BITMAP(dscp_mapped, ICE_DSCP_NUM_VAL);
+ /* array holding DSCP -> UP/TC values for DSCP L3 QoS mode */
+ u8 dscp_map[ICE_DSCP_NUM_VAL];
u8 dcbx_mode;
#define ICE_DCBX_MODE_CEE 0x1
#define ICE_DCBX_MODE_IEEE 0x2
@@ -668,6 +680,10 @@ struct ice_port_info {
struct ice_switch_info {
struct list_head vsi_list_map_head;
struct ice_sw_recipe *recp_list;
+ u16 prof_res_bm_init;
+ u16 max_used_prof_index;
+
+ DECLARE_BITMAP(prof_res_bm[ICE_MAX_NUM_PROFILES], ICE_MAX_FV_WORDS);
};
/* FW logging configuration */
@@ -903,6 +919,7 @@ struct ice_hw {
struct mutex rss_locks; /* protect RSS configuration */
struct list_head rss_list_head;
struct ice_mbx_snapshot mbx_snapshot;
+ u16 io_expander_handle;
};
/* Statistics collected by each port, VSI, VEB, and S-channel */
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
index e93430ab37f1..a42eaf6f942e 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
@@ -5,7 +5,9 @@
#include "ice_base.h"
#include "ice_lib.h"
#include "ice_fltr.h"
+#include "ice_dcb_lib.h"
#include "ice_flow.h"
+#include "ice_eswitch.h"
#include "ice_virtchnl_allowlist.h"
#define FIELD_SELECTOR(proto_hdr_field) \
@@ -251,7 +253,7 @@ ice_vc_hash_field_match_type ice_vc_hash_field_list_comms[] = {
* ice_get_vf_vsi - get VF's VSI based on the stored index
* @vf: VF used to get VSI
*/
-static struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf)
+struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf)
{
return vf->pf->vsi[vf->lan_vsi_idx];
}
@@ -412,7 +414,7 @@ static bool ice_is_vf_link_up(struct ice_vf *vf)
*
* send a link status message to a single VF
*/
-static void ice_vc_notify_vf_link_state(struct ice_vf *vf)
+void ice_vc_notify_vf_link_state(struct ice_vf *vf)
{
struct virtchnl_pf_event pfe = { 0 };
struct ice_hw *hw = &vf->pf->hw;
@@ -620,6 +622,8 @@ void ice_free_vfs(struct ice_pf *pf)
if (!pf->vf)
return;
+ ice_eswitch_release(pf);
+
while (test_and_set_bit(ICE_VF_DIS, pf->state))
usleep_range(1000, 2000);
@@ -828,7 +832,7 @@ static struct ice_vsi *ice_vf_vsi_setup(struct ice_vf *vf)
struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi;
- vsi = ice_vsi_setup(pf, pi, ICE_VSI_VF, vf->vf_id);
+ vsi = ice_vsi_setup(pf, pi, ICE_VSI_VF, vf->vf_id, NULL);
if (!vsi) {
dev_err(ice_pf_to_dev(pf), "Failed to create VF VSI\n");
@@ -855,7 +859,7 @@ struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf)
struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi;
- vsi = ice_vsi_setup(pf, pi, ICE_VSI_CTRL, vf->vf_id);
+ vsi = ice_vsi_setup(pf, pi, ICE_VSI_CTRL, vf->vf_id, NULL);
if (!vsi) {
dev_err(ice_pf_to_dev(pf), "Failed to create VF control VSI\n");
ice_vf_ctrl_invalidate_vsi(vf);
@@ -882,6 +886,40 @@ static int ice_calc_vf_first_vector_idx(struct ice_pf *pf, struct ice_vf *vf)
}
/**
+ * ice_vf_rebuild_host_tx_rate_cfg - re-apply the Tx rate limiting configuration
+ * @vf: VF to re-apply the configuration for
+ *
+ * Called after a VF VSI has been re-added/rebuild during reset. The PF driver
+ * needs to re-apply the host configured Tx rate limiting configuration.
+ */
+static int ice_vf_rebuild_host_tx_rate_cfg(struct ice_vf *vf)
+{
+ struct device *dev = ice_pf_to_dev(vf->pf);
+ struct ice_vsi *vsi = ice_get_vf_vsi(vf);
+ int err;
+
+ if (vf->min_tx_rate) {
+ err = ice_set_min_bw_limit(vsi, (u64)vf->min_tx_rate * 1000);
+ if (err) {
+ dev_err(dev, "failed to set min Tx rate to %d Mbps for VF %u, error %d\n",
+ vf->min_tx_rate, vf->vf_id, err);
+ return err;
+ }
+ }
+
+ if (vf->max_tx_rate) {
+ err = ice_set_max_bw_limit(vsi, (u64)vf->max_tx_rate * 1000);
+ if (err) {
+ dev_err(dev, "failed to set max Tx rate to %d Mbps for VF %u, error %d\n",
+ vf->max_tx_rate, vf->vf_id, err);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+/**
* ice_vf_rebuild_host_vlan_cfg - add VLAN 0 filter or rebuild the Port VLAN
* @vf: VF to add MAC filters for
*
@@ -932,6 +970,9 @@ static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf)
enum ice_status status;
u8 broadcast[ETH_ALEN];
+ if (ice_is_eswitch_mode_switchdev(vf->pf))
+ return 0;
+
eth_broadcast_addr(broadcast);
status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
if (status) {
@@ -1414,6 +1455,11 @@ static void ice_vf_rebuild_host_cfg(struct ice_vf *vf)
if (ice_vf_rebuild_host_vlan_cfg(vf))
dev_err(dev, "failed to rebuild VLAN configuration for VF %u\n",
vf->vf_id);
+
+ if (ice_vf_rebuild_host_tx_rate_cfg(vf))
+ dev_err(dev, "failed to rebuild Tx rate limiting configuration for VF %u\n",
+ vf->vf_id);
+
/* rebuild aggregator node config for main VF VSI */
ice_vf_rebuild_aggregator_node_cfg(vsi);
}
@@ -1581,6 +1627,10 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
ice_vf_post_vsi_rebuild(vf);
}
+ if (ice_is_eswitch_mode_switchdev(pf))
+ if (ice_eswitch_rebuild(pf))
+ dev_warn(dev, "eswitch rebuild failed\n");
+
ice_flush(hw);
clear_bit(ICE_VF_DIS, pf->state);
@@ -1593,7 +1643,7 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
*
* Returns true if the PF or VF is disabled, false otherwise.
*/
-static bool ice_is_vf_disabled(struct ice_vf *vf)
+bool ice_is_vf_disabled(struct ice_vf *vf)
{
struct ice_pf *pf = vf->pf;
@@ -1711,6 +1761,8 @@ bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
}
ice_vf_post_vsi_rebuild(vf);
+ vsi = ice_get_vf_vsi(vf);
+ ice_eswitch_update_repr(vsi);
/* if the VF has been reset allow it to come up again */
if (ice_mbx_clear_malvf(&hw->mbx_snapshot, pf->malvfs, ICE_MAX_VF_COUNT, vf->vf_id))
@@ -1894,6 +1946,8 @@ static void ice_set_dflt_settings_vfs(struct ice_pf *pf)
*/
ice_vf_ctrl_invalidate_vsi(vf);
ice_vf_fdir_init(vf);
+
+ ice_vc_set_dflt_vf_ops(&vf->vc_ops);
}
}
@@ -1960,6 +2014,11 @@ static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs)
}
clear_bit(ICE_VF_DIS, pf->state);
+
+ ret = ice_eswitch_configure(pf);
+ if (ret)
+ goto err_unroll_sriov;
+
return 0;
err_unroll_sriov:
@@ -2823,7 +2882,7 @@ static void ice_wait_on_vf_reset(struct ice_vf *vf)
* disabled, and initialized so it can be configured and/or queried by a host
* administrator.
*/
-static int ice_check_vf_ready_for_cfg(struct ice_vf *vf)
+int ice_check_vf_ready_for_cfg(struct ice_vf *vf)
{
struct ice_pf *pf;
@@ -3329,7 +3388,7 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
q_map = vqs->tx_queues;
for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
- struct ice_ring *ring = vsi->tx_rings[vf_q_id];
+ struct ice_tx_ring *ring = vsi->tx_rings[vf_q_id];
struct ice_txq_meta txq_meta = { 0 };
if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
@@ -3802,6 +3861,26 @@ static bool ice_is_legacy_umac_expired(struct ice_time_mac *last_added_umac)
}
/**
+ * ice_update_legacy_cached_mac - update cached hardware MAC for legacy VF
+ * @vf: VF to update
+ * @vc_ether_addr: structure from VIRTCHNL with MAC to check
+ *
+ * only update cached hardware MAC for legacy VF drivers on delete
+ * because we cannot guarantee order/type of MAC from the VF driver
+ */
+static void
+ice_update_legacy_cached_mac(struct ice_vf *vf,
+ struct virtchnl_ether_addr *vc_ether_addr)
+{
+ if (!ice_is_vc_addr_legacy(vc_ether_addr) ||
+ ice_is_legacy_umac_expired(&vf->legacy_last_added_umac))
+ return;
+
+ ether_addr_copy(vf->dev_lan_addr.addr, vf->legacy_last_added_umac.addr);
+ ether_addr_copy(vf->hw_lan_addr.addr, vf->legacy_last_added_umac.addr);
+}
+
+/**
* ice_vfhw_mac_del - update the VF's cached hardware MAC if allowed
* @vf: VF to update
* @vc_ether_addr: structure from VIRTCHNL with MAC to delete
@@ -3822,16 +3901,7 @@ ice_vfhw_mac_del(struct ice_vf *vf, struct virtchnl_ether_addr *vc_ether_addr)
*/
eth_zero_addr(vf->dev_lan_addr.addr);
- /* only update cached hardware MAC for legacy VF drivers on delete
- * because we cannot guarantee order/type of MAC from the VF driver
- */
- if (ice_is_vc_addr_legacy(vc_ether_addr) &&
- !ice_is_legacy_umac_expired(&vf->legacy_last_added_umac)) {
- ether_addr_copy(vf->dev_lan_addr.addr,
- vf->legacy_last_added_umac.addr);
- ether_addr_copy(vf->hw_lan_addr.addr,
- vf->legacy_last_added_umac.addr);
- }
+ ice_update_legacy_cached_mac(vf, vc_ether_addr);
}
/**
@@ -4400,6 +4470,133 @@ static int ice_vf_init_vlan_stripping(struct ice_vf *vf)
return ice_vsi_manage_vlan_stripping(vsi, false);
}
+static struct ice_vc_vf_ops ice_vc_vf_dflt_ops = {
+ .get_ver_msg = ice_vc_get_ver_msg,
+ .get_vf_res_msg = ice_vc_get_vf_res_msg,
+ .reset_vf = ice_vc_reset_vf_msg,
+ .add_mac_addr_msg = ice_vc_add_mac_addr_msg,
+ .del_mac_addr_msg = ice_vc_del_mac_addr_msg,
+ .cfg_qs_msg = ice_vc_cfg_qs_msg,
+ .ena_qs_msg = ice_vc_ena_qs_msg,
+ .dis_qs_msg = ice_vc_dis_qs_msg,
+ .request_qs_msg = ice_vc_request_qs_msg,
+ .cfg_irq_map_msg = ice_vc_cfg_irq_map_msg,
+ .config_rss_key = ice_vc_config_rss_key,
+ .config_rss_lut = ice_vc_config_rss_lut,
+ .get_stats_msg = ice_vc_get_stats_msg,
+ .cfg_promiscuous_mode_msg = ice_vc_cfg_promiscuous_mode_msg,
+ .add_vlan_msg = ice_vc_add_vlan_msg,
+ .remove_vlan_msg = ice_vc_remove_vlan_msg,
+ .ena_vlan_stripping = ice_vc_ena_vlan_stripping,
+ .dis_vlan_stripping = ice_vc_dis_vlan_stripping,
+ .handle_rss_cfg_msg = ice_vc_handle_rss_cfg,
+ .add_fdir_fltr_msg = ice_vc_add_fdir_fltr,
+ .del_fdir_fltr_msg = ice_vc_del_fdir_fltr,
+};
+
+void ice_vc_set_dflt_vf_ops(struct ice_vc_vf_ops *ops)
+{
+ *ops = ice_vc_vf_dflt_ops;
+}
+
+static int
+ice_vc_repr_no_action_msg(struct ice_vf __always_unused *vf,
+ u8 __always_unused *msg)
+{
+ return 0;
+}
+
+/**
+ * ice_vc_repr_add_mac
+ * @vf: pointer to VF
+ * @msg: virtchannel message
+ *
+ * When port representors are created, we do not add MAC rule
+ * to firmware, we store it so that PF could report same
+ * MAC as VF.
+ */
+static int ice_vc_repr_add_mac(struct ice_vf *vf, u8 *msg)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct virtchnl_ether_addr_list *al =
+ (struct virtchnl_ether_addr_list *)msg;
+ struct ice_vsi *vsi;
+ struct ice_pf *pf;
+ int i;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
+ !ice_vc_isvalid_vsi_id(vf, al->vsi_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto handle_mac_exit;
+ }
+
+ pf = vf->pf;
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto handle_mac_exit;
+ }
+
+ for (i = 0; i < al->num_elements; i++) {
+ u8 *mac_addr = al->list[i].addr;
+
+ if (!is_unicast_ether_addr(mac_addr) ||
+ ether_addr_equal(mac_addr, vf->hw_lan_addr.addr))
+ continue;
+
+ if (vf->pf_set_mac) {
+ dev_err(ice_pf_to_dev(pf), "VF attempting to override administratively set MAC address\n");
+ v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
+ goto handle_mac_exit;
+ }
+
+ ice_vfhw_mac_add(vf, &al->list[i]);
+ vf->num_mac++;
+ break;
+ }
+
+handle_mac_exit:
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR,
+ v_ret, NULL, 0);
+}
+
+/**
+ * ice_vc_repr_del_mac - response with success for deleting MAC
+ * @vf: pointer to VF
+ * @msg: virtchannel message
+ *
+ * Respond with success to not break normal VF flow.
+ * For legacy VF driver try to update cached MAC address.
+ */
+static int
+ice_vc_repr_del_mac(struct ice_vf __always_unused *vf, u8 __always_unused *msg)
+{
+ struct virtchnl_ether_addr_list *al =
+ (struct virtchnl_ether_addr_list *)msg;
+
+ ice_update_legacy_cached_mac(vf, &al->list[0]);
+
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR,
+ VIRTCHNL_STATUS_SUCCESS, NULL, 0);
+}
+
+static int ice_vc_repr_no_action(struct ice_vf __always_unused *vf)
+{
+ return 0;
+}
+
+void ice_vc_change_ops_to_repr(struct ice_vc_vf_ops *ops)
+{
+ ops->add_mac_addr_msg = ice_vc_repr_add_mac;
+ ops->del_mac_addr_msg = ice_vc_repr_del_mac;
+ ops->add_vlan_msg = ice_vc_repr_no_action_msg;
+ ops->remove_vlan_msg = ice_vc_repr_no_action_msg;
+ ops->ena_vlan_stripping = ice_vc_repr_no_action;
+ ops->dis_vlan_stripping = ice_vc_repr_no_action;
+ ops->cfg_promiscuous_mode_msg = ice_vc_repr_no_action_msg;
+}
+
/**
* ice_vc_process_vf_msg - Process request from VF
* @pf: pointer to the PF structure
@@ -4413,6 +4610,7 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event)
u32 v_opcode = le32_to_cpu(event->desc.cookie_high);
s16 vf_id = le16_to_cpu(event->desc.retval);
u16 msglen = event->msg_len;
+ struct ice_vc_vf_ops *ops;
u8 *msg = event->msg_buf;
struct ice_vf *vf = NULL;
struct device *dev;
@@ -4436,6 +4634,8 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event)
goto error_handler;
}
+ ops = &vf->vc_ops;
+
/* Perform basic checks on the msg */
err = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen);
if (err) {
@@ -4463,75 +4663,75 @@ error_handler:
switch (v_opcode) {
case VIRTCHNL_OP_VERSION:
- err = ice_vc_get_ver_msg(vf, msg);
+ err = ops->get_ver_msg(vf, msg);
break;
case VIRTCHNL_OP_GET_VF_RESOURCES:
- err = ice_vc_get_vf_res_msg(vf, msg);
+ err = ops->get_vf_res_msg(vf, msg);
if (ice_vf_init_vlan_stripping(vf))
dev_err(dev, "Failed to initialize VLAN stripping for VF %d\n",
vf->vf_id);
ice_vc_notify_vf_link_state(vf);
break;
case VIRTCHNL_OP_RESET_VF:
- ice_vc_reset_vf_msg(vf);
+ ops->reset_vf(vf);
break;
case VIRTCHNL_OP_ADD_ETH_ADDR:
- err = ice_vc_add_mac_addr_msg(vf, msg);
+ err = ops->add_mac_addr_msg(vf, msg);
break;
case VIRTCHNL_OP_DEL_ETH_ADDR:
- err = ice_vc_del_mac_addr_msg(vf, msg);
+ err = ops->del_mac_addr_msg(vf, msg);
break;
case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
- err = ice_vc_cfg_qs_msg(vf, msg);
+ err = ops->cfg_qs_msg(vf, msg);
break;
case VIRTCHNL_OP_ENABLE_QUEUES:
- err = ice_vc_ena_qs_msg(vf, msg);
+ err = ops->ena_qs_msg(vf, msg);
ice_vc_notify_vf_link_state(vf);
break;
case VIRTCHNL_OP_DISABLE_QUEUES:
- err = ice_vc_dis_qs_msg(vf, msg);
+ err = ops->dis_qs_msg(vf, msg);
break;
case VIRTCHNL_OP_REQUEST_QUEUES:
- err = ice_vc_request_qs_msg(vf, msg);
+ err = ops->request_qs_msg(vf, msg);
break;
case VIRTCHNL_OP_CONFIG_IRQ_MAP:
- err = ice_vc_cfg_irq_map_msg(vf, msg);
+ err = ops->cfg_irq_map_msg(vf, msg);
break;
case VIRTCHNL_OP_CONFIG_RSS_KEY:
- err = ice_vc_config_rss_key(vf, msg);
+ err = ops->config_rss_key(vf, msg);
break;
case VIRTCHNL_OP_CONFIG_RSS_LUT:
- err = ice_vc_config_rss_lut(vf, msg);
+ err = ops->config_rss_lut(vf, msg);
break;
case VIRTCHNL_OP_GET_STATS:
- err = ice_vc_get_stats_msg(vf, msg);
+ err = ops->get_stats_msg(vf, msg);
break;
case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
- err = ice_vc_cfg_promiscuous_mode_msg(vf, msg);
+ err = ops->cfg_promiscuous_mode_msg(vf, msg);
break;
case VIRTCHNL_OP_ADD_VLAN:
- err = ice_vc_add_vlan_msg(vf, msg);
+ err = ops->add_vlan_msg(vf, msg);
break;
case VIRTCHNL_OP_DEL_VLAN:
- err = ice_vc_remove_vlan_msg(vf, msg);
+ err = ops->remove_vlan_msg(vf, msg);
break;
case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
- err = ice_vc_ena_vlan_stripping(vf);
+ err = ops->ena_vlan_stripping(vf);
break;
case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
- err = ice_vc_dis_vlan_stripping(vf);
+ err = ops->dis_vlan_stripping(vf);
break;
case VIRTCHNL_OP_ADD_FDIR_FILTER:
- err = ice_vc_add_fdir_fltr(vf, msg);
+ err = ops->add_fdir_fltr_msg(vf, msg);
break;
case VIRTCHNL_OP_DEL_FDIR_FILTER:
- err = ice_vc_del_fdir_fltr(vf, msg);
+ err = ops->del_fdir_fltr_msg(vf, msg);
break;
case VIRTCHNL_OP_ADD_RSS_CFG:
- err = ice_vc_handle_rss_cfg(vf, msg, true);
+ err = ops->handle_rss_cfg_msg(vf, msg, true);
break;
case VIRTCHNL_OP_DEL_RSS_CFG:
- err = ice_vc_handle_rss_cfg(vf, msg, false);
+ err = ops->handle_rss_cfg_msg(vf, msg, false);
break;
case VIRTCHNL_OP_UNKNOWN:
default:
@@ -4588,8 +4788,8 @@ ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi)
ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
else
ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
- ivi->max_tx_rate = vf->tx_rate;
- ivi->min_tx_rate = 0;
+ ivi->max_tx_rate = vf->max_tx_rate;
+ ivi->min_tx_rate = vf->min_tx_rate;
return 0;
}
@@ -4699,6 +4899,11 @@ int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted)
struct ice_vf *vf;
int ret;
+ if (ice_is_eswitch_mode_switchdev(pf)) {
+ dev_info(ice_pf_to_dev(pf), "Trusted VF is forbidden in switchdev mode\n");
+ return -EOPNOTSUPP;
+ }
+
if (ice_validate_vf_id(pf, vf_id))
return -EINVAL;
@@ -4763,6 +4968,122 @@ int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state)
}
/**
+ * ice_calc_all_vfs_min_tx_rate - calculate cumulative min Tx rate on all VFs
+ * @pf: PF associated with VFs
+ */
+static int ice_calc_all_vfs_min_tx_rate(struct ice_pf *pf)
+{
+ int rate = 0, i;
+
+ ice_for_each_vf(pf, i)
+ rate += pf->vf[i].min_tx_rate;
+
+ return rate;
+}
+
+/**
+ * ice_min_tx_rate_oversubscribed - check if min Tx rate causes oversubscription
+ * @vf: VF trying to configure min_tx_rate
+ * @min_tx_rate: min Tx rate in Mbps
+ *
+ * Check if the min_tx_rate being passed in will cause oversubscription of total
+ * min_tx_rate based on the current link speed and all other VFs configured
+ * min_tx_rate
+ *
+ * Return true if the passed min_tx_rate would cause oversubscription, else
+ * return false
+ */
+static bool
+ice_min_tx_rate_oversubscribed(struct ice_vf *vf, int min_tx_rate)
+{
+ int link_speed_mbps = ice_get_link_speed_mbps(ice_get_vf_vsi(vf));
+ int all_vfs_min_tx_rate = ice_calc_all_vfs_min_tx_rate(vf->pf);
+
+ /* this VF's previous rate is being overwritten */
+ all_vfs_min_tx_rate -= vf->min_tx_rate;
+
+ if (all_vfs_min_tx_rate + min_tx_rate > link_speed_mbps) {
+ dev_err(ice_pf_to_dev(vf->pf), "min_tx_rate of %d Mbps on VF %u would cause oversubscription of %d Mbps based on the current link speed %d Mbps\n",
+ min_tx_rate, vf->vf_id,
+ all_vfs_min_tx_rate + min_tx_rate - link_speed_mbps,
+ link_speed_mbps);
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * ice_set_vf_bw - set min/max VF bandwidth
+ * @netdev: network interface device structure
+ * @vf_id: VF identifier
+ * @min_tx_rate: Minimum Tx rate in Mbps
+ * @max_tx_rate: Maximum Tx rate in Mbps
+ */
+int
+ice_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
+ int max_tx_rate)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct ice_vsi *vsi;
+ struct device *dev;
+ struct ice_vf *vf;
+ int ret;
+
+ dev = ice_pf_to_dev(pf);
+ if (ice_validate_vf_id(pf, vf_id))
+ return -EINVAL;
+
+ vf = &pf->vf[vf_id];
+ ret = ice_check_vf_ready_for_cfg(vf);
+ if (ret)
+ return ret;
+
+ vsi = ice_get_vf_vsi(vf);
+
+ /* when max_tx_rate is zero that means no max Tx rate limiting, so only
+ * check if max_tx_rate is non-zero
+ */
+ if (max_tx_rate && min_tx_rate > max_tx_rate) {
+ dev_err(dev, "Cannot set min Tx rate %d Mbps greater than max Tx rate %d Mbps\n",
+ min_tx_rate, max_tx_rate);
+ return -EINVAL;
+ }
+
+ if (min_tx_rate && ice_is_dcb_active(pf)) {
+ dev_err(dev, "DCB on PF is currently enabled. VF min Tx rate limiting not allowed on this PF.\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (ice_min_tx_rate_oversubscribed(vf, min_tx_rate))
+ return -EINVAL;
+
+ if (vf->min_tx_rate != (unsigned int)min_tx_rate) {
+ ret = ice_set_min_bw_limit(vsi, (u64)min_tx_rate * 1000);
+ if (ret) {
+ dev_err(dev, "Unable to set min-tx-rate for VF %d\n",
+ vf->vf_id);
+ return ret;
+ }
+
+ vf->min_tx_rate = min_tx_rate;
+ }
+
+ if (vf->max_tx_rate != (unsigned int)max_tx_rate) {
+ ret = ice_set_max_bw_limit(vsi, (u64)max_tx_rate * 1000);
+ if (ret) {
+ dev_err(dev, "Unable to set max-tx-rate for VF %d\n",
+ vf->vf_id);
+ return ret;
+ }
+
+ vf->max_tx_rate = max_tx_rate;
+ }
+
+ return 0;
+}
+
+/**
* ice_get_vf_stats - populate some stats for the VF
* @netdev: the netdev of the PF
* @vf_id: the host OS identifier (0-255)
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
index 842cb077df86..5ff93a08f54c 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
@@ -70,6 +70,32 @@ struct ice_mdd_vf_events {
u16 last_printed;
};
+struct ice_vf;
+
+struct ice_vc_vf_ops {
+ int (*get_ver_msg)(struct ice_vf *vf, u8 *msg);
+ int (*get_vf_res_msg)(struct ice_vf *vf, u8 *msg);
+ void (*reset_vf)(struct ice_vf *vf);
+ int (*add_mac_addr_msg)(struct ice_vf *vf, u8 *msg);
+ int (*del_mac_addr_msg)(struct ice_vf *vf, u8 *msg);
+ int (*cfg_qs_msg)(struct ice_vf *vf, u8 *msg);
+ int (*ena_qs_msg)(struct ice_vf *vf, u8 *msg);
+ int (*dis_qs_msg)(struct ice_vf *vf, u8 *msg);
+ int (*request_qs_msg)(struct ice_vf *vf, u8 *msg);
+ int (*cfg_irq_map_msg)(struct ice_vf *vf, u8 *msg);
+ int (*config_rss_key)(struct ice_vf *vf, u8 *msg);
+ int (*config_rss_lut)(struct ice_vf *vf, u8 *msg);
+ int (*get_stats_msg)(struct ice_vf *vf, u8 *msg);
+ int (*cfg_promiscuous_mode_msg)(struct ice_vf *vf, u8 *msg);
+ int (*add_vlan_msg)(struct ice_vf *vf, u8 *msg);
+ int (*remove_vlan_msg)(struct ice_vf *vf, u8 *msg);
+ int (*ena_vlan_stripping)(struct ice_vf *vf);
+ int (*dis_vlan_stripping)(struct ice_vf *vf);
+ int (*handle_rss_cfg_msg)(struct ice_vf *vf, u8 *msg, bool add);
+ int (*add_fdir_fltr_msg)(struct ice_vf *vf, u8 *msg);
+ int (*del_fdir_fltr_msg)(struct ice_vf *vf, u8 *msg);
+};
+
/* VF information structure */
struct ice_vf {
struct ice_pf *pf;
@@ -99,7 +125,8 @@ struct ice_vf {
* the main LAN VSI for the PF.
*/
u16 lan_vsi_num; /* ID as used by firmware */
- unsigned int tx_rate; /* Tx bandwidth limit in Mbps */
+ unsigned int min_tx_rate; /* Minimum Tx bandwidth limit in Mbps */
+ unsigned int max_tx_rate; /* Maximum Tx bandwidth limit in Mbps */
DECLARE_BITMAP(vf_states, ICE_VF_STATES_NBITS); /* VF runtime states */
u64 num_inval_msgs; /* number of continuous invalid msgs */
@@ -111,9 +138,17 @@ struct ice_vf {
struct ice_mdd_vf_events mdd_rx_events;
struct ice_mdd_vf_events mdd_tx_events;
DECLARE_BITMAP(opcodes_allowlist, VIRTCHNL_OP_MAX);
+
+ struct ice_repr *repr;
+
+ struct ice_vc_vf_ops vc_ops;
+
+ /* devlink port data */
+ struct devlink_port devlink_port;
};
#ifdef CONFIG_PCI_IOV
+struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf);
void ice_process_vflr_event(struct ice_pf *pf);
int ice_sriov_configure(struct pci_dev *pdev, int num_vfs);
int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac);
@@ -124,6 +159,9 @@ void ice_free_vfs(struct ice_pf *pf);
void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event);
void ice_vc_notify_link_state(struct ice_pf *pf);
void ice_vc_notify_reset(struct ice_pf *pf);
+void ice_vc_notify_vf_link_state(struct ice_vf *vf);
+void ice_vc_change_ops_to_repr(struct ice_vc_vf_ops *ops);
+void ice_vc_set_dflt_vf_ops(struct ice_vc_vf_ops *ops);
bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr);
bool ice_reset_vf(struct ice_vf *vf, bool is_vflr);
void ice_restore_all_vfs_msi_state(struct pci_dev *pdev);
@@ -135,10 +173,18 @@ int
ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
__be16 vlan_proto);
+int
+ice_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
+ int max_tx_rate);
+
int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted);
int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state);
+int ice_check_vf_ready_for_cfg(struct ice_vf *vf);
+
+bool ice_is_vf_disabled(struct ice_vf *vf);
+
int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena);
int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector);
@@ -164,6 +210,9 @@ static inline
void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event) { }
static inline void ice_vc_notify_link_state(struct ice_pf *pf) { }
static inline void ice_vc_notify_reset(struct ice_pf *pf) { }
+static inline void ice_vc_notify_vf_link_state(struct ice_vf *vf) { }
+static inline void ice_vc_change_ops_to_repr(struct ice_vc_vf_ops *ops) { }
+static inline void ice_vc_set_dflt_vf_ops(struct ice_vc_vf_ops *ops) { }
static inline void ice_set_vf_state_qs_dis(struct ice_vf *vf) { }
static inline
void ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event) { }
@@ -171,6 +220,21 @@ static inline void ice_print_vfs_mdd_events(struct ice_pf *pf) { }
static inline void ice_print_vf_rx_mdd_event(struct ice_vf *vf) { }
static inline void ice_restore_all_vfs_msi_state(struct pci_dev *pdev) { }
+static inline int ice_check_vf_ready_for_cfg(struct ice_vf *vf)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline bool ice_is_vf_disabled(struct ice_vf *vf)
+{
+ return true;
+}
+
+static inline struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf)
+{
+ return NULL;
+}
+
static inline bool
ice_is_malicious_vf(struct ice_pf __always_unused *pf,
struct ice_rq_event_info __always_unused *event,
@@ -245,6 +309,14 @@ ice_set_vf_link_state(struct net_device __always_unused *netdev,
}
static inline int
+ice_set_vf_bw(struct net_device __always_unused *netdev,
+ int __always_unused vf_id, int __always_unused min_tx_rate,
+ int __always_unused max_tx_rate)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int
ice_calc_vf_reg_idx(struct ice_vf __always_unused *vf,
struct ice_q_vector __always_unused *q_vector)
{
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index 5a9f61deeb38..d9dfcfc2c6f9 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -67,7 +67,7 @@ ice_qvec_toggle_napi(struct ice_vsi *vsi, struct ice_q_vector *q_vector,
* @q_vector: queue vector
*/
static void
-ice_qvec_dis_irq(struct ice_vsi *vsi, struct ice_ring *rx_ring,
+ice_qvec_dis_irq(struct ice_vsi *vsi, struct ice_rx_ring *rx_ring,
struct ice_q_vector *q_vector)
{
struct ice_pf *pf = vsi->back;
@@ -104,16 +104,17 @@ ice_qvec_cfg_msix(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
u16 reg_idx = q_vector->reg_idx;
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
- struct ice_ring *ring;
+ struct ice_tx_ring *tx_ring;
+ struct ice_rx_ring *rx_ring;
ice_cfg_itr(hw, q_vector);
- ice_for_each_ring(ring, q_vector->tx)
- ice_cfg_txq_interrupt(vsi, ring->reg_idx, reg_idx,
+ ice_for_each_tx_ring(tx_ring, q_vector->tx)
+ ice_cfg_txq_interrupt(vsi, tx_ring->reg_idx, reg_idx,
q_vector->tx.itr_idx);
- ice_for_each_ring(ring, q_vector->rx)
- ice_cfg_rxq_interrupt(vsi, ring->reg_idx, reg_idx,
+ ice_for_each_rx_ring(rx_ring, q_vector->rx)
+ ice_cfg_rxq_interrupt(vsi, rx_ring->reg_idx, reg_idx,
q_vector->rx.itr_idx);
ice_flush(hw);
@@ -144,8 +145,9 @@ static void ice_qvec_ena_irq(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
{
struct ice_txq_meta txq_meta = { };
- struct ice_ring *tx_ring, *rx_ring;
struct ice_q_vector *q_vector;
+ struct ice_tx_ring *tx_ring;
+ struct ice_rx_ring *rx_ring;
int timeout = 50;
int err;
@@ -171,7 +173,7 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
if (err)
return err;
if (ice_is_xdp_ena_vsi(vsi)) {
- struct ice_ring *xdp_ring = vsi->xdp_rings[q_idx];
+ struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx];
memset(&txq_meta, 0, sizeof(txq_meta));
ice_fill_txq_meta(vsi, xdp_ring, &txq_meta);
@@ -201,8 +203,9 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
{
struct ice_aqc_add_tx_qgrp *qg_buf;
- struct ice_ring *tx_ring, *rx_ring;
struct ice_q_vector *q_vector;
+ struct ice_tx_ring *tx_ring;
+ struct ice_rx_ring *rx_ring;
u16 size;
int err;
@@ -225,7 +228,7 @@ static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
goto free_buf;
if (ice_is_xdp_ena_vsi(vsi)) {
- struct ice_ring *xdp_ring = vsi->xdp_rings[q_idx];
+ struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx];
memset(qg_buf, 0, size);
qg_buf->num_txqs = 1;
@@ -233,7 +236,7 @@ static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
if (err)
goto free_buf;
ice_set_ring_xdp(xdp_ring);
- xdp_ring->xsk_pool = ice_xsk_pool(xdp_ring);
+ xdp_ring->xsk_pool = ice_tx_xsk_pool(xdp_ring);
}
err = ice_vsi_cfg_rxq(rx_ring);
@@ -360,56 +363,50 @@ xsk_pool_if_up:
*
* Returns true if all allocations were successful, false if any fail.
*/
-bool ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, u16 count)
+bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
{
union ice_32b_rx_flex_desc *rx_desc;
u16 ntu = rx_ring->next_to_use;
- struct ice_rx_buf *rx_buf;
- bool ok = true;
+ struct xdp_buff **xdp;
+ u32 nb_buffs, i;
dma_addr_t dma;
- if (!count)
- return true;
-
rx_desc = ICE_RX_DESC(rx_ring, ntu);
- rx_buf = &rx_ring->rx_buf[ntu];
+ xdp = &rx_ring->xdp_buf[ntu];
- do {
- rx_buf->xdp = xsk_buff_alloc(rx_ring->xsk_pool);
- if (!rx_buf->xdp) {
- ok = false;
- break;
- }
+ nb_buffs = min_t(u16, count, rx_ring->count - ntu);
+ nb_buffs = xsk_buff_alloc_batch(rx_ring->xsk_pool, xdp, nb_buffs);
+ if (!nb_buffs)
+ return false;
- dma = xsk_buff_xdp_get_dma(rx_buf->xdp);
+ i = nb_buffs;
+ while (i--) {
+ dma = xsk_buff_xdp_get_dma(*xdp);
rx_desc->read.pkt_addr = cpu_to_le64(dma);
- rx_desc->wb.status_error0 = 0;
rx_desc++;
- rx_buf++;
- ntu++;
-
- if (unlikely(ntu == rx_ring->count)) {
- rx_desc = ICE_RX_DESC(rx_ring, 0);
- rx_buf = rx_ring->rx_buf;
- ntu = 0;
- }
- } while (--count);
+ xdp++;
+ }
- if (rx_ring->next_to_use != ntu) {
- /* clear the status bits for the next_to_use descriptor */
- rx_desc->wb.status_error0 = 0;
- ice_release_rx_desc(rx_ring, ntu);
+ ntu += nb_buffs;
+ if (ntu == rx_ring->count) {
+ rx_desc = ICE_RX_DESC(rx_ring, 0);
+ xdp = rx_ring->xdp_buf;
+ ntu = 0;
}
- return ok;
+ /* clear the status bits for the next_to_use descriptor */
+ rx_desc->wb.status_error0 = 0;
+ ice_release_rx_desc(rx_ring, ntu);
+
+ return count == nb_buffs ? true : false;
}
/**
* ice_bump_ntc - Bump the next_to_clean counter of an Rx ring
* @rx_ring: Rx ring
*/
-static void ice_bump_ntc(struct ice_ring *rx_ring)
+static void ice_bump_ntc(struct ice_rx_ring *rx_ring)
{
int ntc = rx_ring->next_to_clean + 1;
@@ -421,19 +418,19 @@ static void ice_bump_ntc(struct ice_ring *rx_ring)
/**
* ice_construct_skb_zc - Create an sk_buff from zero-copy buffer
* @rx_ring: Rx ring
- * @rx_buf: zero-copy Rx buffer
+ * @xdp_arr: Pointer to the SW ring of xdp_buff pointers
*
* This function allocates a new skb from a zero-copy Rx buffer.
*
* Returns the skb on success, NULL on failure.
*/
static struct sk_buff *
-ice_construct_skb_zc(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
+ice_construct_skb_zc(struct ice_rx_ring *rx_ring, struct xdp_buff **xdp_arr)
{
- unsigned int metasize = rx_buf->xdp->data - rx_buf->xdp->data_meta;
- unsigned int datasize = rx_buf->xdp->data_end - rx_buf->xdp->data;
- unsigned int datasize_hard = rx_buf->xdp->data_end -
- rx_buf->xdp->data_hard_start;
+ struct xdp_buff *xdp = *xdp_arr;
+ unsigned int metasize = xdp->data - xdp->data_meta;
+ unsigned int datasize = xdp->data_end - xdp->data;
+ unsigned int datasize_hard = xdp->data_end - xdp->data_hard_start;
struct sk_buff *skb;
skb = __napi_alloc_skb(&rx_ring->q_vector->napi, datasize_hard,
@@ -441,13 +438,13 @@ ice_construct_skb_zc(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
if (unlikely(!skb))
return NULL;
- skb_reserve(skb, rx_buf->xdp->data - rx_buf->xdp->data_hard_start);
- memcpy(__skb_put(skb, datasize), rx_buf->xdp->data, datasize);
+ skb_reserve(skb, xdp->data - xdp->data_hard_start);
+ memcpy(__skb_put(skb, datasize), xdp->data, datasize);
if (metasize)
skb_metadata_set(skb, metasize);
- xsk_buff_free(rx_buf->xdp);
- rx_buf->xdp = NULL;
+ xsk_buff_free(xdp);
+ *xdp_arr = NULL;
return skb;
}
@@ -455,22 +452,18 @@ ice_construct_skb_zc(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
* ice_run_xdp_zc - Executes an XDP program in zero-copy path
* @rx_ring: Rx ring
* @xdp: xdp_buff used as input to the XDP program
+ * @xdp_prog: XDP program to run
+ * @xdp_ring: ring to be used for XDP_TX action
*
* Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR}
*/
static int
-ice_run_xdp_zc(struct ice_ring *rx_ring, struct xdp_buff *xdp)
+ice_run_xdp_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
+ struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring)
{
int err, result = ICE_XDP_PASS;
- struct bpf_prog *xdp_prog;
- struct ice_ring *xdp_ring;
u32 act;
- /* ZC patch is enabled only when XDP program is set,
- * so here it can not be NULL
- */
- xdp_prog = READ_ONCE(rx_ring->xdp_prog);
-
act = bpf_prog_run_xdp(xdp_prog, xdp);
if (likely(act == XDP_REDIRECT)) {
@@ -484,7 +477,6 @@ ice_run_xdp_zc(struct ice_ring *rx_ring, struct xdp_buff *xdp)
case XDP_PASS:
break;
case XDP_TX:
- xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->q_index];
result = ice_xmit_xdp_buff(xdp, xdp_ring);
if (result == ICE_XDP_CONSUMED)
goto out_failure;
@@ -511,17 +503,25 @@ out_failure:
*
* Returns number of processed packets on success, remaining budget on failure.
*/
-int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget)
+int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
{
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
u16 cleaned_count = ICE_DESC_UNUSED(rx_ring);
+ struct ice_tx_ring *xdp_ring;
unsigned int xdp_xmit = 0;
+ struct bpf_prog *xdp_prog;
bool failure = false;
+ /* ZC patch is enabled only when XDP program is set,
+ * so here it can not be NULL
+ */
+ xdp_prog = READ_ONCE(rx_ring->xdp_prog);
+ xdp_ring = rx_ring->xdp_ring;
+
while (likely(total_rx_packets < (unsigned int)budget)) {
union ice_32b_rx_flex_desc *rx_desc;
unsigned int size, xdp_res = 0;
- struct ice_rx_buf *rx_buf;
+ struct xdp_buff **xdp;
struct sk_buff *skb;
u16 stat_err_bits;
u16 vlan_tag = 0;
@@ -544,18 +544,18 @@ int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget)
if (!size)
break;
- rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean];
- rx_buf->xdp->data_end = rx_buf->xdp->data + size;
- xsk_buff_dma_sync_for_cpu(rx_buf->xdp, rx_ring->xsk_pool);
+ xdp = &rx_ring->xdp_buf[rx_ring->next_to_clean];
+ xsk_buff_set_size(*xdp, size);
+ xsk_buff_dma_sync_for_cpu(*xdp, rx_ring->xsk_pool);
- xdp_res = ice_run_xdp_zc(rx_ring, rx_buf->xdp);
+ xdp_res = ice_run_xdp_zc(rx_ring, *xdp, xdp_prog, xdp_ring);
if (xdp_res) {
if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR))
xdp_xmit |= xdp_res;
else
- xsk_buff_free(rx_buf->xdp);
+ xsk_buff_free(*xdp);
- rx_buf->xdp = NULL;
+ *xdp = NULL;
total_rx_bytes += size;
total_rx_packets++;
cleaned_count++;
@@ -565,7 +565,7 @@ int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget)
}
/* XDP_PASS path */
- skb = ice_construct_skb_zc(rx_ring, rx_buf);
+ skb = ice_construct_skb_zc(rx_ring, xdp);
if (!skb) {
rx_ring->rx_stats.alloc_buf_failed++;
break;
@@ -596,7 +596,7 @@ int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget)
if (cleaned_count >= ICE_RX_BUF_WRITE)
failure = !ice_alloc_rx_bufs_zc(rx_ring, cleaned_count);
- ice_finalize_xdp_rx(rx_ring, xdp_xmit);
+ ice_finalize_xdp_rx(xdp_ring, xdp_xmit);
ice_update_rx_ring_stats(rx_ring, total_rx_packets, total_rx_bytes);
if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) {
@@ -618,7 +618,7 @@ int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget)
*
* Returns true if cleanup/transmission is done.
*/
-static bool ice_xmit_zc(struct ice_ring *xdp_ring, int budget)
+static bool ice_xmit_zc(struct ice_tx_ring *xdp_ring, int budget)
{
struct ice_tx_desc *tx_desc = NULL;
bool work_done = true;
@@ -669,7 +669,7 @@ static bool ice_xmit_zc(struct ice_ring *xdp_ring, int budget)
* @tx_buf: Tx buffer to clean
*/
static void
-ice_clean_xdp_tx_buf(struct ice_ring *xdp_ring, struct ice_tx_buf *tx_buf)
+ice_clean_xdp_tx_buf(struct ice_tx_ring *xdp_ring, struct ice_tx_buf *tx_buf)
{
xdp_return_frame((struct xdp_frame *)tx_buf->raw_buf);
dma_unmap_single(xdp_ring->dev, dma_unmap_addr(tx_buf, dma),
@@ -684,7 +684,7 @@ ice_clean_xdp_tx_buf(struct ice_ring *xdp_ring, struct ice_tx_buf *tx_buf)
*
* Returns true if cleanup/tranmission is done.
*/
-bool ice_clean_tx_irq_zc(struct ice_ring *xdp_ring, int budget)
+bool ice_clean_tx_irq_zc(struct ice_tx_ring *xdp_ring, int budget)
{
int total_packets = 0, total_bytes = 0;
s16 ntc = xdp_ring->next_to_clean;
@@ -757,7 +757,7 @@ ice_xsk_wakeup(struct net_device *netdev, u32 queue_id,
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_q_vector *q_vector;
struct ice_vsi *vsi = np->vsi;
- struct ice_ring *ring;
+ struct ice_tx_ring *ring;
if (test_bit(ICE_DOWN, vsi->state))
return -ENETDOWN;
@@ -808,17 +808,17 @@ bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi)
* ice_xsk_clean_rx_ring - clean buffer pool queues connected to a given Rx ring
* @rx_ring: ring to be cleaned
*/
-void ice_xsk_clean_rx_ring(struct ice_ring *rx_ring)
+void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring)
{
u16 i;
for (i = 0; i < rx_ring->count; i++) {
- struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i];
+ struct xdp_buff **xdp = &rx_ring->xdp_buf[i];
- if (!rx_buf->xdp)
+ if (!xdp)
continue;
- rx_buf->xdp = NULL;
+ *xdp = NULL;
}
}
@@ -826,7 +826,7 @@ void ice_xsk_clean_rx_ring(struct ice_ring *rx_ring)
* ice_xsk_clean_xdp_ring - Clean the XDP Tx ring and its buffer pool queues
* @xdp_ring: XDP_Tx ring
*/
-void ice_xsk_clean_xdp_ring(struct ice_ring *xdp_ring)
+void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring)
{
u16 ntc = xdp_ring->next_to_clean, ntu = xdp_ring->next_to_use;
u32 xsk_frames = 0;
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.h b/drivers/net/ethernet/intel/ice/ice_xsk.h
index ea208808623a..4c7bd8e9dfc4 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.h
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.h
@@ -11,13 +11,13 @@ struct ice_vsi;
#ifdef CONFIG_XDP_SOCKETS
int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool,
u16 qid);
-int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget);
-bool ice_clean_tx_irq_zc(struct ice_ring *xdp_ring, int budget);
+int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget);
+bool ice_clean_tx_irq_zc(struct ice_tx_ring *xdp_ring, int budget);
int ice_xsk_wakeup(struct net_device *netdev, u32 queue_id, u32 flags);
-bool ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, u16 count);
+bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count);
bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi);
-void ice_xsk_clean_rx_ring(struct ice_ring *rx_ring);
-void ice_xsk_clean_xdp_ring(struct ice_ring *xdp_ring);
+void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring);
+void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring);
#else
static inline int
ice_xsk_pool_setup(struct ice_vsi __always_unused *vsi,
@@ -28,21 +28,21 @@ ice_xsk_pool_setup(struct ice_vsi __always_unused *vsi,
}
static inline int
-ice_clean_rx_irq_zc(struct ice_ring __always_unused *rx_ring,
+ice_clean_rx_irq_zc(struct ice_rx_ring __always_unused *rx_ring,
int __always_unused budget)
{
return 0;
}
static inline bool
-ice_clean_tx_irq_zc(struct ice_ring __always_unused *xdp_ring,
+ice_clean_tx_irq_zc(struct ice_tx_ring __always_unused *xdp_ring,
int __always_unused budget)
{
return false;
}
static inline bool
-ice_alloc_rx_bufs_zc(struct ice_ring __always_unused *rx_ring,
+ice_alloc_rx_bufs_zc(struct ice_rx_ring __always_unused *rx_ring,
u16 __always_unused count)
{
return false;
@@ -60,7 +60,7 @@ ice_xsk_wakeup(struct net_device __always_unused *netdev,
return -EOPNOTSUPP;
}
-static inline void ice_xsk_clean_rx_ring(struct ice_ring *rx_ring) { }
-static inline void ice_xsk_clean_xdp_ring(struct ice_ring *xdp_ring) { }
+static inline void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring) { }
+static inline void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring) { }
#endif /* CONFIG_XDP_SOCKETS */
#endif /* !_ICE_XSK_H_ */
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 751de06019a0..e67a71c3f141 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -3356,7 +3356,7 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_err(&pdev->dev, "NVM Read Error\n");
}
- memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
+ eth_hw_addr_set(netdev, hw->mac.addr);
if (!is_valid_ether_addr(netdev->dev_addr)) {
dev_err(&pdev->dev, "Invalid MAC Address\n");
@@ -4988,7 +4988,7 @@ static int igb_set_mac(struct net_device *netdev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, addr->sa_data);
memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
/* set the correct pool for the new PF MAC address in entry 0 */
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index d32e72d953c8..74ccd622251a 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -1527,8 +1527,7 @@ static void igbvf_reset(struct igbvf_adapter *adapter)
spin_unlock_bh(&hw->mbx_lock);
if (is_valid_ether_addr(adapter->hw.mac.addr)) {
- memcpy(netdev->dev_addr, adapter->hw.mac.addr,
- netdev->addr_len);
+ eth_hw_addr_set(netdev, adapter->hw.mac.addr);
memcpy(netdev->perm_addr, adapter->hw.mac.addr,
netdev->addr_len);
}
@@ -1813,7 +1812,7 @@ static int igbvf_set_mac(struct net_device *netdev, void *p)
if (!ether_addr_equal(addr->sa_data, hw->mac.addr))
return -EADDRNOTAVAIL;
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, addr->sa_data);
return 0;
}
@@ -2816,8 +2815,7 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
else if (is_zero_ether_addr(adapter->hw.mac.addr))
dev_info(&pdev->dev,
"MAC address not assigned by administrator.\n");
- memcpy(netdev->dev_addr, adapter->hw.mac.addr,
- netdev->addr_len);
+ eth_hw_addr_set(netdev, adapter->hw.mac.addr);
}
spin_unlock_bh(&hw->mbx_lock);
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 0e19b4d02e62..7ffb1045f00c 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -949,7 +949,7 @@ static int igc_set_mac(struct net_device *netdev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, addr->sa_data);
memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
/* set the correct pool for the new PF MAC address in entry 0 */
@@ -6377,7 +6377,7 @@ static int igc_probe(struct pci_dev *pdev,
dev_err(&pdev->dev, "NVM Read Error\n");
}
- memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
+ eth_hw_addr_set(netdev, hw->mac.addr);
if (!is_valid_ether_addr(netdev->dev_addr)) {
dev_err(&pdev->dev, "Invalid MAC Address\n");
diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c
index 0f021909b430..30568e3544cd 100644
--- a/drivers/net/ethernet/intel/igc/igc_ptp.c
+++ b/drivers/net/ethernet/intel/igc/igc_ptp.c
@@ -773,7 +773,7 @@ static bool igc_is_crosststamp_supported(struct igc_adapter *adapter)
static struct system_counterval_t igc_device_tstamp_to_system(u64 tstamp)
{
-#if IS_ENABLED(CONFIG_X86_TSC)
+#if IS_ENABLED(CONFIG_X86_TSC) && !defined(CONFIG_UML)
return convert_art_ns_to_tsc(tstamp);
#else
return (struct system_counterval_t) { };
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_hw.c b/drivers/net/ethernet/intel/ixgb/ixgb_hw.c
index a430871d1c27..c8d1e815ec6b 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_hw.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_hw.c
@@ -549,7 +549,7 @@ ixgb_mta_set(struct ixgb_hw *hw,
*****************************************************************************/
void
ixgb_rar_set(struct ixgb_hw *hw,
- u8 *addr,
+ const u8 *addr,
u32 index)
{
u32 rar_low, rar_high;
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_hw.h b/drivers/net/ethernet/intel/ixgb/ixgb_hw.h
index 6064583095da..70bcff5fb3db 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_hw.h
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_hw.h
@@ -740,7 +740,7 @@ bool ixgb_adapter_start(struct ixgb_hw *hw);
void ixgb_check_for_link(struct ixgb_hw *hw);
bool ixgb_check_for_bad_link(struct ixgb_hw *hw);
-void ixgb_rar_set(struct ixgb_hw *hw, u8 *addr, u32 index);
+void ixgb_rar_set(struct ixgb_hw *hw, const u8 *addr, u32 index);
/* Filters (multicast, vlan, receive) */
void ixgb_mc_addr_list_update(struct ixgb_hw *hw, u8 *mc_addr_list,
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index 1588376d4c67..99d481904ce6 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -362,6 +362,7 @@ ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct ixgb_adapter *adapter;
static int cards_found = 0;
int pci_using_dac;
+ u8 addr[ETH_ALEN];
int i;
int err;
@@ -461,7 +462,8 @@ ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_eeprom;
}
- ixgb_get_ee_mac_addr(&adapter->hw, netdev->dev_addr);
+ ixgb_get_ee_mac_addr(&adapter->hw, addr);
+ eth_hw_addr_set(netdev, addr);
if (!is_valid_ether_addr(netdev->dev_addr)) {
netif_err(adapter, probe, adapter->netdev, "Invalid MAC Address\n");
@@ -1030,7 +1032,7 @@ ixgb_set_mac(struct net_device *netdev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, addr->sa_data);
ixgb_rar_set(&adapter->hw, addr->sa_data, 0);
@@ -2227,6 +2229,7 @@ static pci_ers_result_t ixgb_io_slot_reset(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct ixgb_adapter *adapter = netdev_priv(netdev);
+ u8 addr[ETH_ALEN];
if (pci_enable_device(pdev)) {
netif_err(adapter, probe, adapter->netdev,
@@ -2250,7 +2253,8 @@ static pci_ers_result_t ixgb_io_slot_reset(struct pci_dev *pdev)
"After reset, the EEPROM checksum is not valid\n");
return PCI_ERS_RESULT_DISCONNECT;
}
- ixgb_get_ee_mac_addr(&adapter->hw, netdev->dev_addr);
+ ixgb_get_ee_mac_addr(&adapter->hw, addr);
+ eth_hw_addr_set(netdev, addr);
memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
if (!is_valid_ether_addr(netdev->perm_addr)) {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index a604552fa634..4a69823e6abd 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -351,6 +351,7 @@ struct ixgbe_ring {
};
u16 rx_offset;
struct xdp_rxq_info xdp_rxq;
+ spinlock_t tx_lock; /* used in XDP mode */
struct xsk_buff_pool *xsk_pool;
u16 ring_idx; /* {rx,tx,xdp}_ring back reference idx */
u16 rx_buf_len;
@@ -375,11 +376,13 @@ enum ixgbe_ring_f_enum {
#define IXGBE_MAX_FCOE_INDICES 8
#define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1)
#define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1)
-#define MAX_XDP_QUEUES (IXGBE_MAX_FDIR_INDICES + 1)
+#define IXGBE_MAX_XDP_QS (IXGBE_MAX_FDIR_INDICES + 1)
#define IXGBE_MAX_L2A_QUEUES 4
#define IXGBE_BAD_L2A_QUEUE 3
#define IXGBE_MAX_MACVLANS 63
+DECLARE_STATIC_KEY_FALSE(ixgbe_xdp_locking_key);
+
struct ixgbe_ring_feature {
u16 limit; /* upper limit on feature indices */
u16 indices; /* current value of indices */
@@ -629,7 +632,7 @@ struct ixgbe_adapter {
/* XDP */
int num_xdp_queues;
- struct ixgbe_ring *xdp_ring[MAX_XDP_QUEUES];
+ struct ixgbe_ring *xdp_ring[IXGBE_MAX_XDP_QS];
unsigned long *af_xdp_zc_qps; /* tracks AF_XDP ZC enabled rings */
/* TX */
@@ -772,6 +775,22 @@ struct ixgbe_adapter {
#endif /* CONFIG_IXGBE_IPSEC */
};
+static inline int ixgbe_determine_xdp_q_idx(int cpu)
+{
+ if (static_key_enabled(&ixgbe_xdp_locking_key))
+ return cpu % IXGBE_MAX_XDP_QS;
+ else
+ return cpu;
+}
+
+static inline
+struct ixgbe_ring *ixgbe_determine_xdp_ring(struct ixgbe_adapter *adapter)
+{
+ int index = ixgbe_determine_xdp_q_idx(smp_processor_id());
+
+ return adapter->xdp_ring[index];
+}
+
static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter)
{
switch (adapter->hw.mac.type) {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index 0218f6c9b925..86b11164655e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -299,7 +299,10 @@ static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
static int ixgbe_xdp_queues(struct ixgbe_adapter *adapter)
{
- return adapter->xdp_prog ? nr_cpu_ids : 0;
+ int queues;
+
+ queues = min_t(int, IXGBE_MAX_XDP_QS, nr_cpu_ids);
+ return adapter->xdp_prog ? queues : 0;
}
#define IXGBE_RSS_64Q_MASK 0x3F
@@ -947,6 +950,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
ring->count = adapter->tx_ring_count;
ring->queue_index = xdp_idx;
set_ring_xdp(ring);
+ spin_lock_init(&ring->tx_lock);
/* assign ring to adapter */
WRITE_ONCE(adapter->xdp_ring[xdp_idx], ring);
@@ -1032,6 +1036,9 @@ static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx)
adapter->q_vector[v_idx] = NULL;
__netif_napi_del(&q_vector->napi);
+ if (static_key_enabled(&ixgbe_xdp_locking_key))
+ static_branch_dec(&ixgbe_xdp_locking_key);
+
/*
* after a call to __netif_napi_del() napi may still be used and
* ixgbe_get_stats64() might access the rings on this vector,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 13c4782b920a..0f9f022260d7 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -165,6 +165,9 @@ MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
MODULE_LICENSE("GPL v2");
+DEFINE_STATIC_KEY_FALSE(ixgbe_xdp_locking_key);
+EXPORT_SYMBOL(ixgbe_xdp_locking_key);
+
static struct workqueue_struct *ixgbe_wq;
static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev);
@@ -2197,6 +2200,7 @@ static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter,
{
int err, result = IXGBE_XDP_PASS;
struct bpf_prog *xdp_prog;
+ struct ixgbe_ring *ring;
struct xdp_frame *xdpf;
u32 act;
@@ -2215,7 +2219,12 @@ static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter,
xdpf = xdp_convert_buff_to_frame(xdp);
if (unlikely(!xdpf))
goto out_failure;
- result = ixgbe_xmit_xdp_ring(adapter, xdpf);
+ ring = ixgbe_determine_xdp_ring(adapter);
+ if (static_branch_unlikely(&ixgbe_xdp_locking_key))
+ spin_lock(&ring->tx_lock);
+ result = ixgbe_xmit_xdp_ring(ring, xdpf);
+ if (static_branch_unlikely(&ixgbe_xdp_locking_key))
+ spin_unlock(&ring->tx_lock);
if (result == IXGBE_XDP_CONSUMED)
goto out_failure;
break;
@@ -2422,13 +2431,9 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
xdp_do_flush_map();
if (xdp_xmit & IXGBE_XDP_TX) {
- struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()];
+ struct ixgbe_ring *ring = ixgbe_determine_xdp_ring(adapter);
- /* Force memory writes to complete before letting h/w
- * know there are new descriptors to fetch.
- */
- wmb();
- writel(ring->next_to_use, ring->tail);
+ ixgbe_xdp_ring_update_tail_locked(ring);
}
u64_stats_update_begin(&rx_ring->syncp);
@@ -6320,7 +6325,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter,
if (ixgbe_init_rss_key(adapter))
return -ENOMEM;
- adapter->af_xdp_zc_qps = bitmap_zalloc(MAX_XDP_QUEUES, GFP_KERNEL);
+ adapter->af_xdp_zc_qps = bitmap_zalloc(IXGBE_MAX_XDP_QS, GFP_KERNEL);
if (!adapter->af_xdp_zc_qps)
return -ENOMEM;
@@ -8536,10 +8541,9 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
}
#endif
-int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter,
+int ixgbe_xmit_xdp_ring(struct ixgbe_ring *ring,
struct xdp_frame *xdpf)
{
- struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()];
struct ixgbe_tx_buffer *tx_buffer;
union ixgbe_adv_tx_desc *tx_desc;
u32 len, cmd_type;
@@ -8788,7 +8792,7 @@ static int ixgbe_set_mac(struct net_device *netdev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, addr->sa_data);
memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
ixgbe_mac_set_default_filter(adapter);
@@ -10131,8 +10135,13 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
return -EINVAL;
}
- if (nr_cpu_ids > MAX_XDP_QUEUES)
+ /* if the number of cpus is much larger than the maximum of queues,
+ * we should stop it and then return with ENOMEM like before.
+ */
+ if (nr_cpu_ids > IXGBE_MAX_XDP_QS * 2)
return -ENOMEM;
+ else if (nr_cpu_ids > IXGBE_MAX_XDP_QS)
+ static_branch_inc(&ixgbe_xdp_locking_key);
old_prog = xchg(&adapter->xdp_prog, prog);
need_reset = (!!prog != !!old_prog);
@@ -10199,6 +10208,15 @@ void ixgbe_xdp_ring_update_tail(struct ixgbe_ring *ring)
writel(ring->next_to_use, ring->tail);
}
+void ixgbe_xdp_ring_update_tail_locked(struct ixgbe_ring *ring)
+{
+ if (static_branch_unlikely(&ixgbe_xdp_locking_key))
+ spin_lock(&ring->tx_lock);
+ ixgbe_xdp_ring_update_tail(ring);
+ if (static_branch_unlikely(&ixgbe_xdp_locking_key))
+ spin_unlock(&ring->tx_lock);
+}
+
static int ixgbe_xdp_xmit(struct net_device *dev, int n,
struct xdp_frame **frames, u32 flags)
{
@@ -10216,18 +10234,21 @@ static int ixgbe_xdp_xmit(struct net_device *dev, int n,
/* During program transitions its possible adapter->xdp_prog is assigned
* but ring has not been configured yet. In this case simply abort xmit.
*/
- ring = adapter->xdp_prog ? adapter->xdp_ring[smp_processor_id()] : NULL;
+ ring = adapter->xdp_prog ? ixgbe_determine_xdp_ring(adapter) : NULL;
if (unlikely(!ring))
return -ENXIO;
if (unlikely(test_bit(__IXGBE_TX_DISABLED, &ring->state)))
return -ENXIO;
+ if (static_branch_unlikely(&ixgbe_xdp_locking_key))
+ spin_lock(&ring->tx_lock);
+
for (i = 0; i < n; i++) {
struct xdp_frame *xdpf = frames[i];
int err;
- err = ixgbe_xmit_xdp_ring(adapter, xdpf);
+ err = ixgbe_xmit_xdp_ring(ring, xdpf);
if (err != IXGBE_XDP_TX)
break;
nxmit++;
@@ -10236,6 +10257,9 @@ static int ixgbe_xdp_xmit(struct net_device *dev, int n,
if (unlikely(flags & XDP_XMIT_FLUSH))
ixgbe_xdp_ring_update_tail(ring);
+ if (static_branch_unlikely(&ixgbe_xdp_locking_key))
+ spin_unlock(&ring->tx_lock);
+
return nxmit;
}
@@ -10903,7 +10927,7 @@ skip_sriov:
eth_platform_get_mac_address(&adapter->pdev->dev,
adapter->hw.mac.perm_addr);
- memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len);
+ eth_hw_addr_set(netdev, hw->mac.perm_addr);
if (!is_valid_ether_addr(netdev->dev_addr)) {
e_dev_err("invalid MAC address\n");
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h
index 2aeec78029bc..a82533f21d36 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h
@@ -12,7 +12,7 @@
#define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \
IXGBE_TXD_CMD_RS)
-int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter,
+int ixgbe_xmit_xdp_ring(struct ixgbe_ring *ring,
struct xdp_frame *xdpf);
bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
union ixgbe_adv_rx_desc *rx_desc,
@@ -23,6 +23,7 @@ void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector,
struct sk_buff *skb);
void ixgbe_xdp_ring_update_tail(struct ixgbe_ring *ring);
+void ixgbe_xdp_ring_update_tail_locked(struct ixgbe_ring *ring);
void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter, u64 qmask);
void ixgbe_txrx_ring_disable(struct ixgbe_adapter *adapter, int ring);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
index b1d22e4d5ec9..db2bc58dfcfd 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
@@ -100,6 +100,7 @@ static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter,
{
int err, result = IXGBE_XDP_PASS;
struct bpf_prog *xdp_prog;
+ struct ixgbe_ring *ring;
struct xdp_frame *xdpf;
u32 act;
@@ -120,7 +121,12 @@ static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter,
xdpf = xdp_convert_buff_to_frame(xdp);
if (unlikely(!xdpf))
goto out_failure;
- result = ixgbe_xmit_xdp_ring(adapter, xdpf);
+ ring = ixgbe_determine_xdp_ring(adapter);
+ if (static_branch_unlikely(&ixgbe_xdp_locking_key))
+ spin_lock(&ring->tx_lock);
+ result = ixgbe_xmit_xdp_ring(ring, xdpf);
+ if (static_branch_unlikely(&ixgbe_xdp_locking_key))
+ spin_unlock(&ring->tx_lock);
if (result == IXGBE_XDP_CONSUMED)
goto out_failure;
break;
@@ -334,13 +340,9 @@ int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector,
xdp_do_flush_map();
if (xdp_xmit & IXGBE_XDP_TX) {
- struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()];
+ struct ixgbe_ring *ring = ixgbe_determine_xdp_ring(adapter);
- /* Force memory writes to complete before letting h/w
- * know there are new descriptors to fetch.
- */
- wmb();
- writel(ring->next_to_use, ring->tail);
+ ixgbe_xdp_ring_update_tail_locked(ring);
}
u64_stats_update_begin(&rx_ring->syncp);
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index c714e1ecd308..d81811ab4ec4 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -2540,7 +2540,7 @@ void ixgbevf_reset(struct ixgbevf_adapter *adapter)
}
if (is_valid_ether_addr(adapter->hw.mac.addr)) {
- ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
+ eth_hw_addr_set(netdev, adapter->hw.mac.addr);
ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr);
}
@@ -3054,7 +3054,7 @@ static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
else if (is_zero_ether_addr(adapter->hw.mac.addr))
dev_info(&pdev->dev,
"MAC address not assigned by administrator.\n");
- ether_addr_copy(netdev->dev_addr, hw->mac.addr);
+ eth_hw_addr_set(netdev, hw->mac.addr);
}
if (!is_valid_ether_addr(netdev->dev_addr)) {
@@ -4231,7 +4231,7 @@ static int ixgbevf_set_mac(struct net_device *netdev, void *p)
ether_addr_copy(hw->mac.addr, addr->sa_data);
ether_addr_copy(hw->mac.perm_addr, addr->sa_data);
- ether_addr_copy(netdev->dev_addr, addr->sa_data);
+ eth_hw_addr_set(netdev, addr->sa_data);
return 0;
}
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index 1bdc4f23e1e5..439674fc9765 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -313,7 +313,7 @@ jme_load_macaddr(struct net_device *netdev)
val = jread32(jme, JME_RXUMA_HI);
macaddr[4] = (val >> 0) & 0xFF;
macaddr[5] = (val >> 8) & 0xFF;
- memcpy(netdev->dev_addr, macaddr, ETH_ALEN);
+ eth_hw_addr_set(netdev, macaddr);
spin_unlock_bh(&jme->macaddr_lock);
}
@@ -2254,7 +2254,7 @@ jme_set_macaddr(struct net_device *netdev, void *p)
return -EBUSY;
spin_lock_bh(&jme->macaddr_lock);
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, addr->sa_data);
jme_set_unicastaddr(netdev);
spin_unlock_bh(&jme->macaddr_lock);
diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c
index 3e9f324f1061..df9a8eefa007 100644
--- a/drivers/net/ethernet/korina.c
+++ b/drivers/net/ethernet/korina.c
@@ -1297,8 +1297,8 @@ static int korina_probe(struct platform_device *pdev)
lp = netdev_priv(dev);
if (mac_addr)
- ether_addr_copy(dev->dev_addr, mac_addr);
- else if (of_get_mac_address(pdev->dev.of_node, dev->dev_addr) < 0)
+ eth_hw_addr_set(dev, mac_addr);
+ else if (of_get_ethdev_address(pdev->dev.of_node, dev) < 0)
eth_hw_addr_random(dev);
clk = devm_clk_get_optional(&pdev->dev, "mdioclk");
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index 62f8c5212182..2258e3f19161 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -96,6 +96,9 @@ struct ltq_etop_priv {
struct ltq_etop_chan ch[MAX_DMA_CHAN];
int tx_free[MAX_DMA_CHAN >> 1];
+ int tx_burst_len;
+ int rx_burst_len;
+
spinlock_t lock;
};
@@ -259,7 +262,7 @@ ltq_etop_hw_init(struct net_device *dev)
/* enable crc generation */
ltq_etop_w32(PPE32_CGEN, LQ_PPE32_ENET_MAC_CFG);
- ltq_dma_init_port(DMA_PORT_ETOP);
+ ltq_dma_init_port(DMA_PORT_ETOP, priv->tx_burst_len, rx_burst_len);
for (i = 0; i < MAX_DMA_CHAN; i++) {
int irq = LTQ_DMA_CH0_INT + i;
@@ -472,8 +475,8 @@ ltq_etop_tx(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_BUSY;
}
- /* dma needs to start on a 16 byte aligned address */
- byte_offset = CPHYSADDR(skb->data) % 16;
+ /* dma needs to start on a burst length value aligned address */
+ byte_offset = CPHYSADDR(skb->data) % (priv->tx_burst_len * 4);
ch->skb[ch->dma.desc] = skb;
netif_trans_update(dev);
@@ -667,6 +670,18 @@ ltq_etop_probe(struct platform_device *pdev)
spin_lock_init(&priv->lock);
SET_NETDEV_DEV(dev, &pdev->dev);
+ err = device_property_read_u32(&pdev->dev, "lantiq,tx-burst-length", &priv->tx_burst_len);
+ if (err < 0) {
+ dev_err(&pdev->dev, "unable to read tx-burst-length property\n");
+ return err;
+ }
+
+ err = device_property_read_u32(&pdev->dev, "lantiq,rx-burst-length", &priv->rx_burst_len);
+ if (err < 0) {
+ dev_err(&pdev->dev, "unable to read rx-burst-length property\n");
+ return err;
+ }
+
for (i = 0; i < MAX_DMA_CHAN; i++) {
if (IS_TX(i))
netif_napi_add(dev, &priv->ch[i].napi,
diff --git a/drivers/net/ethernet/lantiq_xrx200.c b/drivers/net/ethernet/lantiq_xrx200.c
index fb78f17d734f..ecf1e11d9b91 100644
--- a/drivers/net/ethernet/lantiq_xrx200.c
+++ b/drivers/net/ethernet/lantiq_xrx200.c
@@ -14,13 +14,15 @@
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/if_vlan.h>
+
#include <linux/of_net.h>
#include <linux/of_platform.h>
#include <xway_dma.h>
/* DMA */
-#define XRX200_DMA_DATA_LEN 0x600
+#define XRX200_DMA_DATA_LEN (SZ_64K - 1)
#define XRX200_DMA_RX 0
#define XRX200_DMA_TX 1
@@ -71,6 +73,9 @@ struct xrx200_priv {
struct net_device *net_dev;
struct device *dev;
+ int tx_burst_len;
+ int rx_burst_len;
+
__iomem void *pmac_reg;
};
@@ -106,7 +111,8 @@ static void xrx200_flush_dma(struct xrx200_chan *ch)
break;
desc->ctl = LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) |
- XRX200_DMA_DATA_LEN;
+ (ch->priv->net_dev->mtu + VLAN_ETH_HLEN +
+ ETH_FCS_LEN);
ch->dma.desc++;
ch->dma.desc %= LTQ_DESC_NUM;
}
@@ -154,19 +160,20 @@ static int xrx200_close(struct net_device *net_dev)
static int xrx200_alloc_skb(struct xrx200_chan *ch)
{
+ int len = ch->priv->net_dev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
struct sk_buff *skb = ch->skb[ch->dma.desc];
dma_addr_t mapping;
int ret = 0;
ch->skb[ch->dma.desc] = netdev_alloc_skb_ip_align(ch->priv->net_dev,
- XRX200_DMA_DATA_LEN);
+ len);
if (!ch->skb[ch->dma.desc]) {
ret = -ENOMEM;
goto skip;
}
mapping = dma_map_single(ch->priv->dev, ch->skb[ch->dma.desc]->data,
- XRX200_DMA_DATA_LEN, DMA_FROM_DEVICE);
+ len, DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(ch->priv->dev, mapping))) {
dev_kfree_skb_any(ch->skb[ch->dma.desc]);
ch->skb[ch->dma.desc] = skb;
@@ -179,8 +186,7 @@ static int xrx200_alloc_skb(struct xrx200_chan *ch)
wmb();
skip:
ch->dma.desc_base[ch->dma.desc].ctl =
- LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) |
- XRX200_DMA_DATA_LEN;
+ LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) | len;
return ret;
}
@@ -316,8 +322,8 @@ static netdev_tx_t xrx200_start_xmit(struct sk_buff *skb,
if (unlikely(dma_mapping_error(priv->dev, mapping)))
goto err_drop;
- /* dma needs to start on a 16 byte aligned address */
- byte_offset = mapping % 16;
+ /* dma needs to start on a burst length value aligned address */
+ byte_offset = mapping % (priv->tx_burst_len * 4);
desc->addr = mapping - byte_offset;
/* Make sure the address is written before we give it to HW */
@@ -340,10 +346,57 @@ err_drop:
return NETDEV_TX_OK;
}
+static int
+xrx200_change_mtu(struct net_device *net_dev, int new_mtu)
+{
+ struct xrx200_priv *priv = netdev_priv(net_dev);
+ struct xrx200_chan *ch_rx = &priv->chan_rx;
+ int old_mtu = net_dev->mtu;
+ bool running = false;
+ struct sk_buff *skb;
+ int curr_desc;
+ int ret = 0;
+
+ net_dev->mtu = new_mtu;
+
+ if (new_mtu <= old_mtu)
+ return ret;
+
+ running = netif_running(net_dev);
+ if (running) {
+ napi_disable(&ch_rx->napi);
+ ltq_dma_close(&ch_rx->dma);
+ }
+
+ xrx200_poll_rx(&ch_rx->napi, LTQ_DESC_NUM);
+ curr_desc = ch_rx->dma.desc;
+
+ for (ch_rx->dma.desc = 0; ch_rx->dma.desc < LTQ_DESC_NUM;
+ ch_rx->dma.desc++) {
+ skb = ch_rx->skb[ch_rx->dma.desc];
+ ret = xrx200_alloc_skb(ch_rx);
+ if (ret) {
+ net_dev->mtu = old_mtu;
+ break;
+ }
+ dev_kfree_skb_any(skb);
+ }
+
+ ch_rx->dma.desc = curr_desc;
+ if (running) {
+ napi_enable(&ch_rx->napi);
+ ltq_dma_open(&ch_rx->dma);
+ ltq_dma_enable_irq(&ch_rx->dma);
+ }
+
+ return ret;
+}
+
static const struct net_device_ops xrx200_netdev_ops = {
.ndo_open = xrx200_open,
.ndo_stop = xrx200_close,
.ndo_start_xmit = xrx200_start_xmit,
+ .ndo_change_mtu = xrx200_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
@@ -369,7 +422,7 @@ static int xrx200_dma_init(struct xrx200_priv *priv)
int ret = 0;
int i;
- ltq_dma_init_port(DMA_PORT_ETOP);
+ ltq_dma_init_port(DMA_PORT_ETOP, priv->tx_burst_len, rx_burst_len);
ch_rx->dma.nr = XRX200_DMA_RX;
ch_rx->dma.dev = priv->dev;
@@ -453,7 +506,7 @@ static int xrx200_probe(struct platform_device *pdev)
net_dev->netdev_ops = &xrx200_netdev_ops;
SET_NETDEV_DEV(net_dev, dev);
net_dev->min_mtu = ETH_ZLEN;
- net_dev->max_mtu = XRX200_DMA_DATA_LEN;
+ net_dev->max_mtu = XRX200_DMA_DATA_LEN - VLAN_ETH_HLEN - ETH_FCS_LEN;
/* load the memory ranges */
priv->pmac_reg = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
@@ -474,10 +527,22 @@ static int xrx200_probe(struct platform_device *pdev)
return PTR_ERR(priv->clk);
}
- err = of_get_mac_address(np, net_dev->dev_addr);
+ err = of_get_ethdev_address(np, net_dev);
if (err)
eth_hw_addr_random(net_dev);
+ err = device_property_read_u32(dev, "lantiq,tx-burst-length", &priv->tx_burst_len);
+ if (err < 0) {
+ dev_err(dev, "unable to read tx-burst-length property\n");
+ return err;
+ }
+
+ err = device_property_read_u32(dev, "lantiq,rx-burst-length", &priv->rx_burst_len);
+ if (err < 0) {
+ dev_err(dev, "unable to read rx-burst-length property\n");
+ return err;
+ }
+
/* bring up the dma engine and IP core */
err = xrx200_dma_init(priv);
if (err)
diff --git a/drivers/net/ethernet/litex/Kconfig b/drivers/net/ethernet/litex/Kconfig
index 63bf01d28f0c..f99adbf26ab4 100644
--- a/drivers/net/ethernet/litex/Kconfig
+++ b/drivers/net/ethernet/litex/Kconfig
@@ -17,7 +17,7 @@ if NET_VENDOR_LITEX
config LITEX_LITEETH
tristate "LiteX Ethernet support"
- depends on OF_NET
+ depends on OF
help
If you wish to compile a kernel for hardware with a LiteX LiteEth
device then you should answer Y to this.
diff --git a/drivers/net/ethernet/litex/litex_liteeth.c b/drivers/net/ethernet/litex/litex_liteeth.c
index a9bdbf0dcfe1..3d9385a4989b 100644
--- a/drivers/net/ethernet/litex/litex_liteeth.c
+++ b/drivers/net/ethernet/litex/litex_liteeth.c
@@ -266,7 +266,7 @@ static int liteeth_probe(struct platform_device *pdev)
priv->tx_base = buf_base + priv->num_rx_slots * priv->slot_size;
priv->tx_slot = 0;
- err = of_get_mac_address(pdev->dev.of_node, netdev->dev_addr);
+ err = of_get_ethdev_address(pdev->dev.of_node, netdev);
if (err)
eth_hw_addr_random(netdev);
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 28d5ad296646..bb14fa2241a3 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -1770,7 +1770,7 @@ static void uc_addr_get(struct mv643xx_eth_private *mp, unsigned char *addr)
addr[5] = mac_l & 0xff;
}
-static void uc_addr_set(struct mv643xx_eth_private *mp, unsigned char *addr)
+static void uc_addr_set(struct mv643xx_eth_private *mp, const u8 *addr)
{
wrlp(mp, MAC_ADDR_HIGH,
(addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3]);
@@ -1919,7 +1919,7 @@ static int mv643xx_eth_set_mac_address(struct net_device *dev, void *addr)
if (!is_valid_ether_addr(sa->sa_data))
return -EADDRNOTAVAIL;
- memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN);
+ eth_hw_addr_set(dev, sa->sa_data);
netif_addr_lock_bh(dev);
mv643xx_eth_program_unicast_filter(dev);
@@ -2925,10 +2925,14 @@ static void set_params(struct mv643xx_eth_private *mp,
struct net_device *dev = mp->dev;
unsigned int tx_ring_size;
- if (is_valid_ether_addr(pd->mac_addr))
- memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN);
- else
- uc_addr_get(mp, dev->dev_addr);
+ if (is_valid_ether_addr(pd->mac_addr)) {
+ eth_hw_addr_set(dev, pd->mac_addr);
+ } else {
+ u8 addr[ETH_ALEN];
+
+ uc_addr_get(mp, addr);
+ eth_hw_addr_set(dev, addr);
+ }
mp->rx_ring_size = DEFAULT_RX_QUEUE_SIZE;
if (pd->rx_queue_size)
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 9d460a270601..98f276c617fb 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -1623,8 +1623,8 @@ static void mvneta_set_ucast_addr(struct mvneta_port *pp, u8 last_nibble,
}
/* Set mac address */
-static void mvneta_mac_addr_set(struct mvneta_port *pp, unsigned char *addr,
- int queue)
+static void mvneta_mac_addr_set(struct mvneta_port *pp,
+ const unsigned char *addr, int queue)
{
unsigned int mac_h;
unsigned int mac_l;
@@ -1914,7 +1914,7 @@ static int mvneta_rx_refill(struct mvneta_port *pp,
}
/* Handle tx checksum */
-static u32 mvneta_skb_tx_csum(struct mvneta_port *pp, struct sk_buff *skb)
+static u32 mvneta_skb_tx_csum(struct sk_buff *skb)
{
if (skb->ip_summed == CHECKSUM_PARTIAL) {
int ip_hdr_len = 0;
@@ -2595,8 +2595,7 @@ err_drop_frame:
}
static inline void
-mvneta_tso_put_hdr(struct sk_buff *skb,
- struct mvneta_port *pp, struct mvneta_tx_queue *txq)
+mvneta_tso_put_hdr(struct sk_buff *skb, struct mvneta_tx_queue *txq)
{
int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
@@ -2604,7 +2603,7 @@ mvneta_tso_put_hdr(struct sk_buff *skb,
tx_desc = mvneta_txq_next_desc_get(txq);
tx_desc->data_size = hdr_len;
- tx_desc->command = mvneta_skb_tx_csum(pp, skb);
+ tx_desc->command = mvneta_skb_tx_csum(skb);
tx_desc->command |= MVNETA_TXD_F_DESC;
tx_desc->buf_phys_addr = txq->tso_hdrs_phys +
txq->txq_put_index * TSO_HEADER_SIZE;
@@ -2681,7 +2680,7 @@ static int mvneta_tx_tso(struct sk_buff *skb, struct net_device *dev,
hdr = txq->tso_hdrs + txq->txq_put_index * TSO_HEADER_SIZE;
tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
- mvneta_tso_put_hdr(skb, pp, txq);
+ mvneta_tso_put_hdr(skb, txq);
while (data_left > 0) {
int size;
@@ -2799,7 +2798,7 @@ static netdev_tx_t mvneta_tx(struct sk_buff *skb, struct net_device *dev)
/* Get a descriptor for the first part of the packet */
tx_desc = mvneta_txq_next_desc_get(txq);
- tx_cmd = mvneta_skb_tx_csum(pp, skb);
+ tx_cmd = mvneta_skb_tx_csum(skb);
tx_desc->data_size = skb_headlen(skb);
@@ -5242,14 +5241,14 @@ static int mvneta_probe(struct platform_device *pdev)
goto err_free_ports;
}
- err = of_get_mac_address(dn, dev->dev_addr);
+ err = of_get_ethdev_address(dn, dev);
if (!err) {
mac_from = "device tree";
} else {
mvneta_get_mac_addr(pp, hw_mac_addr);
if (is_valid_ether_addr(hw_mac_addr)) {
mac_from = "hardware";
- memcpy(dev->dev_addr, hw_mac_addr, ETH_ALEN);
+ eth_hw_addr_set(dev, hw_mac_addr);
} else {
mac_from = "random";
eth_hw_addr_random(dev);
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index d5c92e43f89e..ad3be55cce68 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -6081,9 +6081,9 @@ static void mvpp2_port_copy_mac_addr(struct net_device *dev, struct mvpp2 *priv,
char hw_mac_addr[ETH_ALEN] = {0};
char fw_mac_addr[ETH_ALEN];
- if (fwnode_get_mac_address(fwnode, fw_mac_addr, ETH_ALEN)) {
+ if (!fwnode_get_mac_address(fwnode, fw_mac_addr)) {
*mac_from = "firmware node";
- ether_addr_copy(dev->dev_addr, fw_mac_addr);
+ eth_hw_addr_set(dev, fw_mac_addr);
return;
}
@@ -6091,7 +6091,7 @@ static void mvpp2_port_copy_mac_addr(struct net_device *dev, struct mvpp2 *priv,
mvpp21_get_mac_address(port, hw_mac_addr);
if (is_valid_ether_addr(hw_mac_addr)) {
*mac_from = "hardware";
- ether_addr_copy(dev->dev_addr, hw_mac_addr);
+ eth_hw_addr_set(dev, hw_mac_addr);
return;
}
}
@@ -6301,12 +6301,7 @@ static void mvpp2_phylink_validate(struct phylink_config *config,
case PHY_INTERFACE_MODE_XAUI:
case PHY_INTERFACE_MODE_NA:
if (mvpp2_port_supports_xlg(port)) {
- phylink_set(mask, 10000baseT_Full);
- phylink_set(mask, 10000baseCR_Full);
- phylink_set(mask, 10000baseSR_Full);
- phylink_set(mask, 10000baseLR_Full);
- phylink_set(mask, 10000baseLRM_Full);
- phylink_set(mask, 10000baseER_Full);
+ phylink_set_10g_modes(mask);
phylink_set(mask, 10000baseKR_Full);
}
if (state->interface != PHY_INTERFACE_MODE_NA)
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
index 93575800ca92..75ba57bd1d46 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
@@ -2347,7 +2347,7 @@ int mvpp2_prs_update_mac_da(struct net_device *dev, const u8 *da)
return err;
/* Set addr in the device */
- ether_addr_copy(dev->dev_addr, da);
+ eth_hw_addr_set(dev, da);
return 0;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
index 34a089b71e55..186d00a9ab35 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -838,9 +838,6 @@ void cgx_lmac_ptp_config(void *cgxd, int lmac_id, bool enable)
if (!cgx)
return;
- if (is_dev_rpm(cgx))
- return;
-
if (enable) {
/* Enable inbound PTP timestamping */
cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
@@ -1522,7 +1519,6 @@ static int cgx_lmac_exit(struct cgx *cgx)
int i;
if (cgx->cgx_cmd_workq) {
- flush_workqueue(cgx->cgx_cmd_workq);
destroy_workqueue(cgx->cgx_cmd_workq);
cgx->cgx_cmd_workq = NULL;
}
@@ -1545,9 +1541,11 @@ static int cgx_lmac_exit(struct cgx *cgx)
static void cgx_populate_features(struct cgx *cgx)
{
if (is_dev_rpm(cgx))
- cgx->hw_features = (RVU_MAC_RPM | RVU_LMAC_FEAT_FC);
+ cgx->hw_features = (RVU_LMAC_FEAT_DMACF | RVU_MAC_RPM |
+ RVU_LMAC_FEAT_FC | RVU_LMAC_FEAT_PTP);
else
- cgx->hw_features = (RVU_LMAC_FEAT_FC | RVU_LMAC_FEAT_PTP);
+ cgx->hw_features = (RVU_LMAC_FEAT_FC | RVU_LMAC_FEAT_HIGIG2 |
+ RVU_LMAC_FEAT_PTP | RVU_LMAC_FEAT_DMACF);
}
static struct mac_ops cgx_mac_ops = {
@@ -1571,6 +1569,7 @@ static struct mac_ops cgx_mac_ops = {
.mac_get_pause_frm_status = cgx_lmac_get_pause_frm_status,
.mac_enadis_pause_frm = cgx_lmac_enadis_pause_frm,
.mac_pause_frm_config = cgx_lmac_pause_frm_config,
+ .mac_enadis_ptp_config = cgx_lmac_ptp_config,
};
static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/common.h b/drivers/net/ethernet/marvell/octeontx2/af/common.h
index d9bea13f15b8..8931864ee110 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/common.h
@@ -191,6 +191,7 @@ enum nix_scheduler {
#define NIX_CHAN_SDP_CH_START (0x700ull)
#define NIX_CHAN_SDP_CHX(a) (NIX_CHAN_SDP_CH_START + (a))
#define NIX_CHAN_SDP_NUM_CHANS 256
+#define NIX_CHAN_CPT_CH_START (0x800ull)
/* The mask is to extract lower 10-bits of channel number
* which CPT will pass to X2P.
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h
index c38306b3384a..fc6e7423cbd8 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h
@@ -102,6 +102,11 @@ struct mac_ops {
void (*mac_pause_frm_config)(void *cgxd,
int lmac_id,
bool enable);
+
+ /* Enable/Disable Inbound PTP */
+ void (*mac_enadis_ptp_config)(void *cgxd,
+ int lmac_id,
+ bool enable);
};
struct cgx {
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index 154877706a0e..4e79e918a161 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -84,7 +84,7 @@ struct mbox_msghdr {
#define OTX2_MBOX_REQ_SIG (0xdead)
#define OTX2_MBOX_RSP_SIG (0xbeef)
u16 sig; /* Signature, for validating corrupted msgs */
-#define OTX2_MBOX_VERSION (0x0009)
+#define OTX2_MBOX_VERSION (0x000a)
u16 ver; /* Version of msg's structure for this ID */
u16 next_msgoff; /* Offset of next msg within mailbox region */
int rc; /* Msg process'ed response code */
@@ -154,23 +154,23 @@ M(CGX_PTP_RX_ENABLE, 0x20C, cgx_ptp_rx_enable, msg_req, msg_rsp) \
M(CGX_PTP_RX_DISABLE, 0x20D, cgx_ptp_rx_disable, msg_req, msg_rsp) \
M(CGX_CFG_PAUSE_FRM, 0x20E, cgx_cfg_pause_frm, cgx_pause_frm_cfg, \
cgx_pause_frm_cfg) \
-M(CGX_FEC_SET, 0x210, cgx_set_fec_param, fec_mode, fec_mode) \
-M(CGX_FEC_STATS, 0x211, cgx_fec_stats, msg_req, cgx_fec_stats_rsp) \
-M(CGX_GET_PHY_FEC_STATS, 0x212, cgx_get_phy_fec_stats, msg_req, msg_rsp) \
-M(CGX_FW_DATA_GET, 0x213, cgx_get_aux_link_info, msg_req, cgx_fw_data) \
-M(CGX_SET_LINK_MODE, 0x214, cgx_set_link_mode, cgx_set_link_mode_req,\
- cgx_set_link_mode_rsp) \
-M(CGX_FEATURES_GET, 0x215, cgx_features_get, msg_req, \
- cgx_features_info_msg) \
-M(RPM_STATS, 0x216, rpm_stats, msg_req, rpm_stats_rsp) \
-M(CGX_MAC_ADDR_ADD, 0x217, cgx_mac_addr_add, cgx_mac_addr_add_req, \
- cgx_mac_addr_add_rsp) \
-M(CGX_MAC_ADDR_DEL, 0x218, cgx_mac_addr_del, cgx_mac_addr_del_req, \
+M(CGX_FW_DATA_GET, 0x20F, cgx_get_aux_link_info, msg_req, cgx_fw_data) \
+M(CGX_FEC_SET, 0x210, cgx_set_fec_param, fec_mode, fec_mode) \
+M(CGX_MAC_ADDR_ADD, 0x211, cgx_mac_addr_add, cgx_mac_addr_add_req, \
+ cgx_mac_addr_add_rsp) \
+M(CGX_MAC_ADDR_DEL, 0x212, cgx_mac_addr_del, cgx_mac_addr_del_req, \
msg_rsp) \
-M(CGX_MAC_MAX_ENTRIES_GET, 0x219, cgx_mac_max_entries_get, msg_req, \
+M(CGX_MAC_MAX_ENTRIES_GET, 0x213, cgx_mac_max_entries_get, msg_req, \
cgx_max_dmac_entries_get_rsp) \
-M(CGX_MAC_ADDR_RESET, 0x21A, cgx_mac_addr_reset, msg_req, msg_rsp) \
-M(CGX_MAC_ADDR_UPDATE, 0x21B, cgx_mac_addr_update, cgx_mac_addr_update_req, \
+M(CGX_FEC_STATS, 0x217, cgx_fec_stats, msg_req, cgx_fec_stats_rsp) \
+M(CGX_SET_LINK_MODE, 0x218, cgx_set_link_mode, cgx_set_link_mode_req,\
+ cgx_set_link_mode_rsp) \
+M(CGX_GET_PHY_FEC_STATS, 0x219, cgx_get_phy_fec_stats, msg_req, msg_rsp) \
+M(CGX_FEATURES_GET, 0x21B, cgx_features_get, msg_req, \
+ cgx_features_info_msg) \
+M(RPM_STATS, 0x21C, rpm_stats, msg_req, rpm_stats_rsp) \
+M(CGX_MAC_ADDR_RESET, 0x21D, cgx_mac_addr_reset, msg_req, msg_rsp) \
+M(CGX_MAC_ADDR_UPDATE, 0x21E, cgx_mac_addr_update, cgx_mac_addr_update_req, \
msg_rsp) \
/* NPA mbox IDs (range 0x400 - 0x5FF) */ \
M(NPA_LF_ALLOC, 0x400, npa_lf_alloc, \
@@ -186,9 +186,12 @@ M(CPT_LF_ALLOC, 0xA00, cpt_lf_alloc, cpt_lf_alloc_req_msg, \
M(CPT_LF_FREE, 0xA01, cpt_lf_free, msg_req, msg_rsp) \
M(CPT_RD_WR_REGISTER, 0xA02, cpt_rd_wr_register, cpt_rd_wr_reg_msg, \
cpt_rd_wr_reg_msg) \
+M(CPT_INLINE_IPSEC_CFG, 0xA04, cpt_inline_ipsec_cfg, \
+ cpt_inline_ipsec_cfg_msg, msg_rsp) \
M(CPT_STATS, 0xA05, cpt_sts, cpt_sts_req, cpt_sts_rsp) \
M(CPT_RXC_TIME_CFG, 0xA06, cpt_rxc_time_cfg, cpt_rxc_time_cfg_req, \
msg_rsp) \
+M(CPT_CTX_CACHE_SYNC, 0xA07, cpt_ctx_cache_sync, msg_req, msg_rsp) \
/* SDP mbox IDs (range 0x1000 - 0x11FF) */ \
M(SET_SDP_CHAN_INFO, 0x1000, set_sdp_chan_info, sdp_chan_info_msg, msg_rsp) \
M(GET_SDP_CHAN_INFO, 0x1001, get_sdp_chan_info, msg_req, sdp_get_chan_info_msg) \
@@ -229,6 +232,8 @@ M(NPC_DELETE_FLOW, 0x600e, npc_delete_flow, \
M(NPC_MCAM_READ_ENTRY, 0x600f, npc_mcam_read_entry, \
npc_mcam_read_entry_req, \
npc_mcam_read_entry_rsp) \
+M(NPC_SET_PKIND, 0x6010, npc_set_pkind, \
+ npc_set_pkind, msg_rsp) \
M(NPC_MCAM_READ_BASE_RULE, 0x6011, npc_read_base_steer_rule, \
msg_req, npc_mcam_read_base_rule_rsp) \
M(NPC_MCAM_GET_STATS, 0x6012, npc_mcam_entry_stats, \
@@ -270,6 +275,10 @@ M(NIX_BP_ENABLE, 0x8016, nix_bp_enable, nix_bp_cfg_req, \
nix_bp_cfg_rsp) \
M(NIX_BP_DISABLE, 0x8017, nix_bp_disable, nix_bp_cfg_req, msg_rsp) \
M(NIX_GET_MAC_ADDR, 0x8018, nix_get_mac_addr, msg_req, nix_get_mac_addr_rsp) \
+M(NIX_INLINE_IPSEC_CFG, 0x8019, nix_inline_ipsec_cfg, \
+ nix_inline_ipsec_cfg, msg_rsp) \
+M(NIX_INLINE_IPSEC_LF_CFG, 0x801a, nix_inline_ipsec_lf_cfg, \
+ nix_inline_ipsec_lf_cfg, msg_rsp) \
M(NIX_CN10K_AQ_ENQ, 0x801b, nix_cn10k_aq_enq, nix_cn10k_aq_enq_req, \
nix_cn10k_aq_enq_rsp) \
M(NIX_GET_HW_INFO, 0x801c, nix_get_hw_info, msg_req, nix_hw_info) \
@@ -284,10 +293,14 @@ M(NIX_BANDPROF_GET_HWINFO, 0x801f, nix_bandprof_get_hwinfo, msg_req, \
#define MBOX_UP_CGX_MESSAGES \
M(CGX_LINK_EVENT, 0xC00, cgx_link_event, cgx_link_info_msg, msg_rsp)
+#define MBOX_UP_CPT_MESSAGES \
+M(CPT_INST_LMTST, 0xD00, cpt_inst_lmtst, cpt_inst_lmtst_req, msg_rsp)
+
enum {
#define M(_name, _id, _1, _2, _3) MBOX_MSG_ ## _name = _id,
MBOX_MESSAGES
MBOX_UP_CGX_MESSAGES
+MBOX_UP_CPT_MESSAGES
#undef M
};
@@ -575,10 +588,13 @@ struct cgx_mac_addr_update_req {
};
#define RVU_LMAC_FEAT_FC BIT_ULL(0) /* pause frames */
-#define RVU_LMAC_FEAT_PTP BIT_ULL(1) /* precision time protocol */
-#define RVU_MAC_VERSION BIT_ULL(2)
-#define RVU_MAC_CGX BIT_ULL(3)
-#define RVU_MAC_RPM BIT_ULL(4)
+#define RVU_LMAC_FEAT_HIGIG2 BIT_ULL(1)
+ /* flow control from physical link higig2 messages */
+#define RVU_LMAC_FEAT_PTP BIT_ULL(2) /* precison time protocol */
+#define RVU_LMAC_FEAT_DMACF BIT_ULL(3) /* DMAC FILTER */
+#define RVU_MAC_VERSION BIT_ULL(4)
+#define RVU_MAC_CGX BIT_ULL(5)
+#define RVU_MAC_RPM BIT_ULL(6)
struct cgx_features_info_msg {
struct mbox_msghdr hdr;
@@ -593,6 +609,22 @@ struct rpm_stats_rsp {
u64 tx_stats[RPM_TX_STATS_COUNT];
};
+struct npc_set_pkind {
+ struct mbox_msghdr hdr;
+#define OTX2_PRIV_FLAGS_DEFAULT BIT_ULL(0)
+#define OTX2_PRIV_FLAGS_CUSTOM BIT_ULL(63)
+ u64 mode;
+#define PKIND_TX BIT_ULL(0)
+#define PKIND_RX BIT_ULL(1)
+ u8 dir;
+ u8 pkind; /* valid only in case custom flag */
+ u8 var_len_off; /* Offset of custom header length field.
+ * Valid only for pkind NPC_RX_CUSTOM_PRE_L2_PKIND
+ */
+ u8 var_len_off_mask; /* Mask for length with in offset */
+ u8 shift_dir; /* shift direction to get length of the header at var_len_off */
+};
+
/* NPA mbox message formats */
/* NPA mailbox error codes
@@ -698,6 +730,8 @@ enum nix_af_status {
NIX_AF_ERR_INVALID_BANDPROF = -426,
NIX_AF_ERR_IPOLICER_NOTSUPP = -427,
NIX_AF_ERR_BANDPROF_INVAL_REQ = -428,
+ NIX_AF_ERR_CQ_CTX_WRITE_ERR = -429,
+ NIX_AF_ERR_AQ_CTX_RETRY_WRITE = -430,
};
/* For NIX RX vtag action */
@@ -1065,6 +1099,40 @@ struct nix_bp_cfg_rsp {
u8 chan_cnt; /* Number of channel for which bpids are assigned */
};
+/* Global NIX inline IPSec configuration */
+struct nix_inline_ipsec_cfg {
+ struct mbox_msghdr hdr;
+ u32 cpt_credit;
+ struct {
+ u8 egrp;
+ u8 opcode;
+ u16 param1;
+ u16 param2;
+ } gen_cfg;
+ struct {
+ u16 cpt_pf_func;
+ u8 cpt_slot;
+ } inst_qsel;
+ u8 enable;
+};
+
+/* Per NIX LF inline IPSec configuration */
+struct nix_inline_ipsec_lf_cfg {
+ struct mbox_msghdr hdr;
+ u64 sa_base_addr;
+ struct {
+ u32 tag_const;
+ u16 lenm1_max;
+ u8 sa_pow2_size;
+ u8 tt;
+ } ipsec_cfg0;
+ struct {
+ u32 sa_idx_max;
+ u8 sa_idx_w;
+ } ipsec_cfg1;
+ u8 enable;
+};
+
struct nix_hw_info {
struct mbox_msghdr hdr;
u16 rsvs16;
@@ -1357,12 +1425,15 @@ struct npc_mcam_get_stats_rsp {
enum ptp_op {
PTP_OP_ADJFINE = 0,
PTP_OP_GET_CLOCK = 1,
+ PTP_OP_GET_TSTMP = 2,
+ PTP_OP_SET_THRESH = 3,
};
struct ptp_req {
struct mbox_msghdr hdr;
u8 op;
s64 scaled_ppm;
+ u64 thresh;
};
struct ptp_rsp {
@@ -1399,7 +1470,9 @@ enum cpt_af_status {
CPT_AF_ERR_LF_INVALID = -903,
CPT_AF_ERR_ACCESS_DENIED = -904,
CPT_AF_ERR_SSO_PF_FUNC_INVALID = -905,
- CPT_AF_ERR_NIX_PF_FUNC_INVALID = -906
+ CPT_AF_ERR_NIX_PF_FUNC_INVALID = -906,
+ CPT_AF_ERR_INLINE_IPSEC_INB_ENA = -907,
+ CPT_AF_ERR_INLINE_IPSEC_OUT_ENA = -908
};
/* CPT mbox message formats */
@@ -1420,6 +1493,22 @@ struct cpt_lf_alloc_req_msg {
int blkaddr;
};
+#define CPT_INLINE_INBOUND 0
+#define CPT_INLINE_OUTBOUND 1
+
+/* Mailbox message request format for CPT IPsec
+ * inline inbound and outbound configuration.
+ */
+struct cpt_inline_ipsec_cfg_msg {
+ struct mbox_msghdr hdr;
+ u8 enable;
+ u8 slot;
+ u8 dir;
+ u8 sso_pf_func_ovrd;
+ u16 sso_pf_func; /* inbound path SSO_PF_FUNC */
+ u16 nix_pf_func; /* outbound path NIX_PF_FUNC */
+};
+
/* Mailbox message request and response format for CPT stats. */
struct cpt_sts_req {
struct mbox_msghdr hdr;
@@ -1478,6 +1567,13 @@ struct cpt_rxc_time_cfg_req {
u16 active_limit;
};
+/* Mailbox message request format to request for CPT_INST_S lmtst. */
+struct cpt_inst_lmtst_req {
+ struct mbox_msghdr hdr;
+ u64 inst[8];
+ u64 rsvd;
+};
+
struct sdp_node_info {
/* Node to which this PF belons to */
u8 node_id;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
index 3a819b24accc..6e1192f52608 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/npc.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
@@ -31,9 +31,9 @@ enum npc_kpu_la_ltype {
NPC_LT_LA_HIGIG2_ETHER,
NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
NPC_LT_LA_CUSTOM_L2_90B_ETHER,
- NPC_LT_LA_CH_LEN_90B_ETHER,
NPC_LT_LA_CPT_HDR,
NPC_LT_LA_CUSTOM_L2_24B_ETHER,
+ NPC_LT_LA_CUSTOM_PRE_L2_ETHER,
NPC_LT_LA_CUSTOM0 = 0xE,
NPC_LT_LA_CUSTOM1 = 0xF,
};
@@ -148,10 +148,11 @@ enum npc_kpu_lh_ltype {
* Software assigns pkind for each incoming port such as CGX
* Ethernet interfaces, LBK interfaces, etc.
*/
-#define NPC_UNRESERVED_PKIND_COUNT NPC_RX_VLAN_EXDSA_PKIND
+#define NPC_UNRESERVED_PKIND_COUNT NPC_RX_CUSTOM_PRE_L2_PKIND
enum npc_pkind_type {
NPC_RX_LBK_PKIND = 0ULL,
+ NPC_RX_CUSTOM_PRE_L2_PKIND = 55ULL,
NPC_RX_VLAN_EXDSA_PKIND = 56ULL,
NPC_RX_CHLEN24B_PKIND = 57ULL,
NPC_RX_CPT_HDR_PKIND,
@@ -162,6 +163,10 @@ enum npc_pkind_type {
NPC_TX_DEF_PKIND, /* NIX-TX PKIND */
};
+enum npc_interface_type {
+ NPC_INTF_MODE_DEF,
+};
+
/* list of known and supported fields in packet header and
* fields present in key structure.
*/
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
index 588822a0cf21..1a8c5376297c 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
@@ -176,9 +176,8 @@ enum npc_kpu_parser_state {
NPC_S_KPU1_EXDSA,
NPC_S_KPU1_HIGIG2,
NPC_S_KPU1_IH_NIX_HIGIG2,
- NPC_S_KPU1_CUSTOM_L2_90B,
+ NPC_S_KPU1_CUSTOM_PRE_L2,
NPC_S_KPU1_CPT_HDR,
- NPC_S_KPU1_CUSTOM_L2_24B,
NPC_S_KPU1_VLAN_EXDSA,
NPC_S_KPU2_CTAG,
NPC_S_KPU2_CTAG2,
@@ -188,6 +187,8 @@ enum npc_kpu_parser_state {
NPC_S_KPU2_PREHEADER,
NPC_S_KPU2_EXDSA,
NPC_S_KPU2_NGIO,
+ NPC_S_KPU2_CPT_CTAG,
+ NPC_S_KPU2_CPT_QINQ,
NPC_S_KPU3_CTAG,
NPC_S_KPU3_STAG,
NPC_S_KPU3_QINQ,
@@ -979,8 +980,8 @@ static struct npc_kpu_profile_action ikpu_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
12, 16, 20, 0, 0,
- NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA,
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_PRE_L2_ETHER,
0,
0, 0, 0, 0,
@@ -996,27 +997,27 @@ static struct npc_kpu_profile_action ikpu_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 36, 40, 44, 0, 0,
- NPC_S_KPU1_CUSTOM_L2_24B, 0, 0,
- NPC_LID_LA, NPC_LT_NA,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_CUSTOM_PRE_L2, 24, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 40, 54, 58, 0, 0,
- NPC_S_KPU1_CPT_HDR, 0, 0,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_CPT_HDR, 40, 0,
NPC_LID_LA, NPC_LT_NA,
0,
- 0, 0, 0, 0,
+ 7, 7, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 102, 106, 110, 0, 0,
- NPC_S_KPU1_CUSTOM_L2_90B, 0, 0,
- NPC_LID_LA, NPC_LT_NA,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_CUSTOM_PRE_L2, 90, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
0,
0, 0, 0, 0,
@@ -1711,7 +1712,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
0x0000,
},
{
- NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
NPC_ETYPE_IP,
0xffff,
0x0000,
@@ -1720,7 +1721,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
0x0000,
},
{
- NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
NPC_ETYPE_IP6,
0xffff,
0x0000,
@@ -1729,7 +1730,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
0x0000,
},
{
- NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
NPC_ETYPE_ARP,
0xffff,
0x0000,
@@ -1738,7 +1739,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
0x0000,
},
{
- NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
NPC_ETYPE_RARP,
0xffff,
0x0000,
@@ -1747,7 +1748,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
0x0000,
},
{
- NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
NPC_ETYPE_PTP,
0xffff,
0x0000,
@@ -1756,7 +1757,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
0x0000,
},
{
- NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
NPC_ETYPE_FCOE,
0xffff,
0x0000,
@@ -1765,7 +1766,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
0x0000,
},
{
- NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
NPC_ETYPE_CTAG,
0xffff,
NPC_ETYPE_CTAG,
@@ -1774,7 +1775,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
0x0000,
},
{
- NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
NPC_ETYPE_CTAG,
0xffff,
0x0000,
@@ -1783,7 +1784,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
0x0000,
},
{
- NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
NPC_ETYPE_SBTAG,
0xffff,
0x0000,
@@ -1792,7 +1793,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
0x0000,
},
{
- NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
NPC_ETYPE_QINQ,
0xffff,
0x0000,
@@ -1801,7 +1802,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
0x0000,
},
{
- NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
NPC_ETYPE_ETAG,
0xffff,
0x0000,
@@ -1810,7 +1811,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
0x0000,
},
{
- NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
NPC_ETYPE_MPLSU,
0xffff,
0x0000,
@@ -1819,7 +1820,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
0x0000,
},
{
- NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
NPC_ETYPE_MPLSM,
0xffff,
0x0000,
@@ -1828,7 +1829,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
0x0000,
},
{
- NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
NPC_ETYPE_NSH,
0xffff,
0x0000,
@@ -1837,7 +1838,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
0x0000,
},
{
- NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
0x0000,
0x0000,
0x0000,
@@ -1847,168 +1848,33 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
},
{
NPC_S_KPU1_CPT_HDR, 0xff,
- 0x0000,
- 0xffff,
NPC_ETYPE_IP,
0xffff,
0x0000,
0x0000,
- },
- {
- NPC_S_KPU1_CPT_HDR, 0xff,
- 0x0000,
- 0xffff,
- NPC_ETYPE_IP6,
- 0xffff,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU1_CPT_HDR, 0xff,
- 0x0000,
- 0xffff,
- NPC_ETYPE_CTAG,
- 0xffff,
0x0000,
0x0000,
},
{
NPC_S_KPU1_CPT_HDR, 0xff,
- 0x0000,
- 0xffff,
- NPC_ETYPE_QINQ,
- 0xffff,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU1_CPT_HDR, 0xff,
- 0x0000,
- 0xffff,
- 0x0000,
- 0x0000,
- NPC_ETYPE_IP,
- 0xffff,
- },
- {
- NPC_S_KPU1_CPT_HDR, 0xff,
- 0x0000,
- 0xffff,
- 0x0000,
- 0x0000,
NPC_ETYPE_IP6,
0xffff,
- },
- {
- NPC_S_KPU1_CPT_HDR, 0xff,
0x0000,
- 0xffff,
0x0000,
0x0000,
- NPC_ETYPE_CTAG,
- 0xffff,
- },
- {
- NPC_S_KPU1_CPT_HDR, 0xff,
- 0x0000,
- 0xffff,
- 0x0000,
0x0000,
- NPC_ETYPE_QINQ,
- 0xffff,
},
{
NPC_S_KPU1_CPT_HDR, 0xff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
- NPC_ETYPE_IP,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
- NPC_ETYPE_IP6,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
- NPC_ETYPE_ARP,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
- NPC_ETYPE_RARP,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
- NPC_ETYPE_PTP,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
- NPC_ETYPE_FCOE,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
NPC_ETYPE_CTAG,
0xffff,
- NPC_ETYPE_CTAG,
- 0xffff,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
- NPC_ETYPE_CTAG,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
- NPC_ETYPE_SBTAG,
- 0xffff,
0x0000,
0x0000,
0x0000,
0x0000,
},
{
- NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
+ NPC_S_KPU1_CPT_HDR, 0xff,
NPC_ETYPE_QINQ,
0xffff,
0x0000,
@@ -2017,51 +1883,6 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
0x0000,
},
{
- NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
- NPC_ETYPE_ETAG,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
- NPC_ETYPE_MPLSU,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
- NPC_ETYPE_MPLSM,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
- NPC_ETYPE_NSH,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
NPC_S_KPU1_VLAN_EXDSA, 0xff,
NPC_ETYPE_CTAG,
0xffff,
@@ -3066,6 +2887,42 @@ static struct npc_kpu_profile_cam kpu2_cam_entries[] = {
0x0000,
},
{
+ NPC_S_KPU2_CPT_CTAG, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CPT_CTAG, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CPT_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CPT_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
NPC_S_NA, 0X00,
0x0000,
0x0000,
@@ -7496,15 +7353,6 @@ static struct npc_kpu_profile_cam kpu9_cam_entries[] = {
NPC_S_KPU9_GTPU, 0xff,
0x0000,
0x0000,
- NPC_GTP_PT_GTP | NPC_GTP_VER1 | NPC_GTP_MT_G_PDU,
- NPC_GTP_PT_MASK | NPC_GTP_VER_MASK | NPC_GTP_MT_MASK,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU9_GTPU, 0xff,
- 0x0000,
- 0x0000,
NPC_GTP_PT_GTP | NPC_GTP_VER1,
NPC_GTP_PT_MASK | NPC_GTP_VER_MASK,
0x0000,
@@ -9192,159 +9040,127 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 0, 6, 3, 0,
- NPC_S_KPU5_IP, 104, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
+ NPC_S_KPU5_IP, 14, 0,
+ NPC_LID_LA, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
6, 0, 0, 3, 0,
- NPC_S_KPU5_IP6, 104, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
+ NPC_S_KPU5_IP6, 14, 0,
+ NPC_LID_LA, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 3, 0,
- NPC_S_KPU5_ARP, 104, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
+ NPC_S_KPU5_ARP, 14, 0,
+ NPC_LID_LA, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 3, 0,
- NPC_S_KPU5_RARP, 104, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
+ NPC_S_KPU5_RARP, 14, 0,
+ NPC_LID_LA, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 3, 0,
- NPC_S_KPU5_PTP, 104, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
+ NPC_S_KPU5_PTP, 14, 0,
+ NPC_LID_LA, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 3, 0,
- NPC_S_KPU5_FCOE, 104, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
+ NPC_S_KPU5_FCOE, 14, 0,
+ NPC_LID_LA, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 12, 0, 0, 0,
- NPC_S_KPU2_CTAG2, 102, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
- NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ NPC_S_KPU2_CTAG2, 12, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
4, 8, 0, 0, 0,
- NPC_S_KPU2_CTAG, 102, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
- NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ NPC_S_KPU2_CTAG, 12, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
4, 8, 22, 0, 0,
- NPC_S_KPU2_SBTAG, 102, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
- NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ NPC_S_KPU2_SBTAG, 12, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
4, 8, 0, 0, 0,
- NPC_S_KPU2_QINQ, 102, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
- NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ NPC_S_KPU2_QINQ, 12, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 12, 26, 0, 0,
- NPC_S_KPU2_ETAG, 102, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
- NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_ETAG,
+ NPC_S_KPU2_ETAG, 12, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
2, 6, 10, 2, 0,
- NPC_S_KPU4_MPLS, 104, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
- NPC_F_LA_L_WITH_MPLS,
+ NPC_S_KPU4_MPLS, 14, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
2, 6, 10, 2, 0,
- NPC_S_KPU4_MPLS, 104, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
- NPC_F_LA_L_WITH_MPLS,
+ NPC_S_KPU4_MPLS, 14, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
2, 0, 0, 2, 0,
- NPC_S_KPU4_NSH, 104, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
- NPC_F_LA_L_WITH_NSH,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 0, 1,
- NPC_S_NA, 0, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
- NPC_F_LA_L_UNK_ETYPE,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 8, 0, 6, 3, 0,
- NPC_S_KPU5_CPT_IP, 56, 1,
- NPC_LID_LA, NPC_LT_LA_CPT_HDR,
+ NPC_S_KPU4_NSH, 14, 0,
+ NPC_LID_LA, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 3, 0,
- NPC_S_KPU5_CPT_IP6, 56, 1,
- NPC_LID_LA, NPC_LT_LA_CPT_HDR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 4, 8, 0, 0, 0,
- NPC_S_KPU2_CTAG, 54, 1,
- NPC_LID_LA, NPC_LT_LA_CPT_HDR,
- NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 4, 8, 0, 0, 0,
- NPC_S_KPU2_QINQ, 54, 1,
- NPC_LID_LA, NPC_LT_LA_CPT_HDR,
- NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 0, 6, 3, 0,
- NPC_S_KPU5_CPT_IP, 60, 1,
+ NPC_S_KPU5_CPT_IP, 14, 1,
NPC_LID_LA, NPC_LT_LA_CPT_HDR,
0,
0, 0, 0, 0,
@@ -9352,7 +9168,7 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
6, 0, 0, 3, 0,
- NPC_S_KPU5_CPT_IP6, 60, 1,
+ NPC_S_KPU5_CPT_IP6, 14, 1,
NPC_LID_LA, NPC_LT_LA_CPT_HDR,
0,
0, 0, 0, 0,
@@ -9360,7 +9176,7 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
4, 8, 0, 0, 0,
- NPC_S_KPU2_CTAG, 58, 1,
+ NPC_S_KPU2_CPT_CTAG, 12, 1,
NPC_LID_LA, NPC_LT_LA_CPT_HDR,
NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
0, 0, 0, 0,
@@ -9368,141 +9184,13 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
4, 8, 0, 0, 0,
- NPC_S_KPU2_QINQ, 58, 1,
+ NPC_S_KPU2_CPT_QINQ, 12, 1,
NPC_LID_LA, NPC_LT_LA_CPT_HDR,
NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 0, 1,
- NPC_S_NA, 0, 1,
- NPC_LID_LA, NPC_LT_LA_CPT_HDR,
- NPC_F_LA_L_UNK_ETYPE,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 8, 0, 6, 3, 0,
- NPC_S_KPU5_IP, 38, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 3, 0,
- NPC_S_KPU5_IP6, 38, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 3, 0,
- NPC_S_KPU5_ARP, 38, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 3, 0,
- NPC_S_KPU5_RARP, 38, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 3, 0,
- NPC_S_KPU5_PTP, 38, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 3, 0,
- NPC_S_KPU5_FCOE, 38, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 8, 12, 0, 0, 0,
- NPC_S_KPU2_CTAG2, 36, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
- NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 4, 8, 0, 0, 0,
- NPC_S_KPU2_CTAG, 36, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
- NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 4, 8, 22, 0, 0,
- NPC_S_KPU2_SBTAG, 36, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
- NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 4, 8, 0, 0, 0,
- NPC_S_KPU2_QINQ, 36, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
- NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 8, 12, 26, 0, 0,
- NPC_S_KPU2_ETAG, 36, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
- NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_ETAG,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 2, 6, 10, 2, 0,
- NPC_S_KPU4_MPLS, 38, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
- NPC_F_LA_L_WITH_MPLS,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 2, 6, 10, 2, 0,
- NPC_S_KPU4_MPLS, 38, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
- NPC_F_LA_L_WITH_MPLS,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 2, 0, 0, 2, 0,
- NPC_S_KPU4_NSH, 38, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
- NPC_F_LA_L_WITH_NSH,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 0, 1,
- NPC_S_NA, 0, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
- NPC_F_LA_L_UNK_ETYPE,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
12, 0, 0, 1, 0,
NPC_S_KPU3_VLAN_EXDSA, 12, 1,
NPC_LID_LA, NPC_LT_LA_ETHER,
@@ -10395,6 +10083,38 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = {
0, 0, 0, 0,
},
{
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU5_CPT_IP, 6, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU5_CPT_IP6, 6, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU5_CPT_IP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU5_CPT_IP6, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
+ },
+ {
NPC_ERRLEV_LB, NPC_EC_L2_K3,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
@@ -14335,16 +14055,8 @@ static struct npc_kpu_profile_action kpu9_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 8, 0, 6, 2, 0,
- NPC_S_KPU12_TU_IP, 8, 1,
- NPC_LID_LE, NPC_LT_LE_GTPU,
- NPC_F_LE_L_GTPU_G_PDU,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 8, 0, 6, 2, 0,
- NPC_S_KPU12_TU_IP, 8, 1,
+ 8, 0, 6, 2, 1,
+ NPC_S_NA, 0, 1,
NPC_LID_LE, NPC_LT_LE_GTPU,
0,
0, 0, 0, 0,
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
index 9b8e59f4c206..d6321de3cc17 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
@@ -27,54 +27,29 @@
#define PCI_DEVID_CN10K_PTP 0xA09E
#define PCI_PTP_BAR_NO 0
-#define PCI_RST_BAR_NO 0
#define PTP_CLOCK_CFG 0xF00ULL
#define PTP_CLOCK_CFG_PTP_EN BIT_ULL(0)
+#define PTP_CLOCK_CFG_EXT_CLK_EN BIT_ULL(1)
+#define PTP_CLOCK_CFG_EXT_CLK_IN_MASK GENMASK_ULL(7, 2)
+#define PTP_CLOCK_CFG_TSTMP_EDGE BIT_ULL(9)
+#define PTP_CLOCK_CFG_TSTMP_EN BIT_ULL(8)
+#define PTP_CLOCK_CFG_TSTMP_IN_MASK GENMASK_ULL(15, 10)
+#define PTP_CLOCK_CFG_PPS_EN BIT_ULL(30)
+#define PTP_CLOCK_CFG_PPS_INV BIT_ULL(31)
+
+#define PTP_PPS_HI_INCR 0xF60ULL
+#define PTP_PPS_LO_INCR 0xF68ULL
+#define PTP_PPS_THRESH_HI 0xF58ULL
+
#define PTP_CLOCK_LO 0xF08ULL
#define PTP_CLOCK_HI 0xF10ULL
#define PTP_CLOCK_COMP 0xF18ULL
-
-#define RST_BOOT 0x1600ULL
-#define RST_MUL_BITS GENMASK_ULL(38, 33)
-#define CLOCK_BASE_RATE 50000000ULL
+#define PTP_TIMESTAMP 0xF20ULL
static struct ptp *first_ptp_block;
static const struct pci_device_id ptp_id_table[];
-static u64 get_clock_rate(void)
-{
- u64 cfg, ret = CLOCK_BASE_RATE * 16;
- struct pci_dev *pdev;
- void __iomem *base;
-
- /* To get the input clock frequency with which PTP co-processor
- * block is running the base frequency(50 MHz) needs to be multiplied
- * with multiplier bits present in RST_BOOT register of RESET block.
- * Hence below code gets the multiplier bits from the RESET PCI
- * device present in the system.
- */
- pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
- PCI_DEVID_OCTEONTX2_RST, NULL);
- if (!pdev)
- goto error;
-
- base = pci_ioremap_bar(pdev, PCI_RST_BAR_NO);
- if (!base)
- goto error_put_pdev;
-
- cfg = readq(base + RST_BOOT);
- ret = CLOCK_BASE_RATE * FIELD_GET(RST_MUL_BITS, cfg);
-
- iounmap(base);
-
-error_put_pdev:
- pci_dev_put(pdev);
-
-error:
- return ret;
-}
-
struct ptp *ptp_get(void)
{
struct ptp *ptp = first_ptp_block;
@@ -145,13 +120,74 @@ static int ptp_get_clock(struct ptp *ptp, u64 *clk)
return 0;
}
+void ptp_start(struct ptp *ptp, u64 sclk, u32 ext_clk_freq, u32 extts)
+{
+ struct pci_dev *pdev;
+ u64 clock_comp;
+ u64 clock_cfg;
+
+ if (!ptp)
+ return;
+
+ pdev = ptp->pdev;
+
+ if (!sclk) {
+ dev_err(&pdev->dev, "PTP input clock cannot be zero\n");
+ return;
+ }
+
+ /* sclk is in MHz */
+ ptp->clock_rate = sclk * 1000000;
+
+ /* Enable PTP clock */
+ clock_cfg = readq(ptp->reg_base + PTP_CLOCK_CFG);
+
+ if (ext_clk_freq) {
+ ptp->clock_rate = ext_clk_freq;
+ /* Set GPIO as PTP clock source */
+ clock_cfg &= ~PTP_CLOCK_CFG_EXT_CLK_IN_MASK;
+ clock_cfg |= PTP_CLOCK_CFG_EXT_CLK_EN;
+ }
+
+ if (extts) {
+ clock_cfg |= PTP_CLOCK_CFG_TSTMP_EDGE;
+ /* Set GPIO as timestamping source */
+ clock_cfg &= ~PTP_CLOCK_CFG_TSTMP_IN_MASK;
+ clock_cfg |= PTP_CLOCK_CFG_TSTMP_EN;
+ }
+
+ clock_cfg |= PTP_CLOCK_CFG_PTP_EN;
+ clock_cfg |= PTP_CLOCK_CFG_PPS_EN | PTP_CLOCK_CFG_PPS_INV;
+ writeq(clock_cfg, ptp->reg_base + PTP_CLOCK_CFG);
+
+ /* Set 50% duty cycle for 1Hz output */
+ writeq(0x1dcd650000000000, ptp->reg_base + PTP_PPS_HI_INCR);
+ writeq(0x1dcd650000000000, ptp->reg_base + PTP_PPS_LO_INCR);
+
+ clock_comp = ((u64)1000000000ull << 32) / ptp->clock_rate;
+ /* Initial compensation value to start the nanosecs counter */
+ writeq(clock_comp, ptp->reg_base + PTP_CLOCK_COMP);
+}
+
+static int ptp_get_tstmp(struct ptp *ptp, u64 *clk)
+{
+ *clk = readq(ptp->reg_base + PTP_TIMESTAMP);
+
+ return 0;
+}
+
+static int ptp_set_thresh(struct ptp *ptp, u64 thresh)
+{
+ writeq(thresh, ptp->reg_base + PTP_PPS_THRESH_HI);
+
+ return 0;
+}
+
static int ptp_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct device *dev = &pdev->dev;
struct ptp *ptp;
- u64 clock_comp;
- u64 clock_cfg;
int err;
ptp = devm_kzalloc(dev, sizeof(*ptp), GFP_KERNEL);
@@ -172,17 +208,6 @@ static int ptp_probe(struct pci_dev *pdev,
ptp->reg_base = pcim_iomap_table(pdev)[PCI_PTP_BAR_NO];
- ptp->clock_rate = get_clock_rate();
-
- /* Enable PTP clock */
- clock_cfg = readq(ptp->reg_base + PTP_CLOCK_CFG);
- clock_cfg |= PTP_CLOCK_CFG_PTP_EN;
- writeq(clock_cfg, ptp->reg_base + PTP_CLOCK_CFG);
-
- clock_comp = ((u64)1000000000ull << 32) / ptp->clock_rate;
- /* Initial compensation value to start the nanosecs counter */
- writeq(clock_comp, ptp->reg_base + PTP_CLOCK_COMP);
-
pci_set_drvdata(pdev, ptp);
if (!first_ptp_block)
first_ptp_block = ptp;
@@ -272,6 +297,12 @@ int rvu_mbox_handler_ptp_op(struct rvu *rvu, struct ptp_req *req,
case PTP_OP_GET_CLOCK:
err = ptp_get_clock(rvu->ptp, &rsp->clk);
break;
+ case PTP_OP_GET_TSTMP:
+ err = ptp_get_tstmp(rvu->ptp, &rsp->clk);
+ break;
+ case PTP_OP_SET_THRESH:
+ err = ptp_set_thresh(rvu->ptp, req->thresh);
+ break;
default:
err = -EINVAL;
break;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/ptp.h b/drivers/net/ethernet/marvell/octeontx2/af/ptp.h
index 76d404b24552..1b81a0493cd3 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/ptp.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/ptp.h
@@ -20,6 +20,7 @@ struct ptp {
struct ptp *ptp_get(void);
void ptp_put(struct ptp *ptp);
+void ptp_start(struct ptp *ptp, u64 sclk, u32 ext_clk_freq, u32 extts);
extern struct pci_driver ptp_driver;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
index 07b0eafccad8..e695fa0e82a9 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
@@ -29,6 +29,7 @@ static struct mac_ops rpm_mac_ops = {
.mac_get_pause_frm_status = rpm_lmac_get_pause_frm_status,
.mac_enadis_pause_frm = rpm_lmac_enadis_pause_frm,
.mac_pause_frm_config = rpm_lmac_pause_frm_config,
+ .mac_enadis_ptp_config = rpm_lmac_ptp_config,
};
struct mac_ops *rpm_get_mac_ops(void)
@@ -270,3 +271,19 @@ int rpm_lmac_internal_loopback(void *rpmd, int lmac_id, bool enable)
return 0;
}
+
+void rpm_lmac_ptp_config(void *rpmd, int lmac_id, bool enable)
+{
+ rpm_t *rpm = rpmd;
+ u64 cfg;
+
+ if (!is_lmac_valid(rpm, lmac_id))
+ return;
+
+ cfg = rpm_read(rpm, lmac_id, RPMX_CMRX_CFG);
+ if (enable)
+ cfg |= RPMX_RX_TS_PREPEND;
+ else
+ cfg &= ~RPMX_RX_TS_PREPEND;
+ rpm_write(rpm, lmac_id, RPMX_CMRX_CFG, cfg);
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.h b/drivers/net/ethernet/marvell/octeontx2/af/rpm.h
index f0b069442dcc..57c8a687b488 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rpm.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.h
@@ -14,6 +14,8 @@
#define PCI_DEVID_CN10K_RPM 0xA060
/* Registers */
+#define RPMX_CMRX_CFG 0x00
+#define RPMX_RX_TS_PREPEND BIT_ULL(22)
#define RPMX_CMRX_SW_INT 0x180
#define RPMX_CMRX_SW_INT_W1S 0x188
#define RPMX_CMRX_SW_INT_ENA_W1S 0x198
@@ -54,4 +56,5 @@ int rpm_lmac_enadis_pause_frm(void *rpmd, int lmac_id, u8 tx_pause,
u8 rx_pause);
int rpm_get_tx_stats(void *rpmd, int lmac_id, int idx, u64 *tx_stat);
int rpm_get_rx_stats(void *rpmd, int lmac_id, int idx, u64 *rx_stat);
+void rpm_lmac_ptp_config(void *rpmd, int lmac_id, bool enable);
#endif /* RPM_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index 35836903b7fb..cb56e171ddd4 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -854,6 +854,7 @@ static int rvu_setup_nix_hw_resource(struct rvu *rvu, int blkaddr)
block->lfcfg_reg = NIX_PRIV_LFX_CFG;
block->msixcfg_reg = NIX_PRIV_LFX_INT_CFG;
block->lfreset_reg = NIX_AF_LF_RST;
+ block->rvu = rvu;
sprintf(block->name, "NIX%d", blkid);
rvu->nix_blkaddr[blkid] = blkaddr;
return rvu_alloc_bitmap(&block->lf);
@@ -883,6 +884,7 @@ static int rvu_setup_cpt_hw_resource(struct rvu *rvu, int blkaddr)
block->lfcfg_reg = CPT_PRIV_LFX_CFG;
block->msixcfg_reg = CPT_PRIV_LFX_INT_CFG;
block->lfreset_reg = CPT_AF_LF_RST;
+ block->rvu = rvu;
sprintf(block->name, "CPT%d", blkid);
return rvu_alloc_bitmap(&block->lf);
}
@@ -940,6 +942,7 @@ static int rvu_setup_hw_resources(struct rvu *rvu)
block->lfcfg_reg = NPA_PRIV_LFX_CFG;
block->msixcfg_reg = NPA_PRIV_LFX_INT_CFG;
block->lfreset_reg = NPA_AF_LF_RST;
+ block->rvu = rvu;
sprintf(block->name, "NPA");
err = rvu_alloc_bitmap(&block->lf);
if (err) {
@@ -979,6 +982,7 @@ nix:
block->lfcfg_reg = SSO_PRIV_LFX_HWGRP_CFG;
block->msixcfg_reg = SSO_PRIV_LFX_HWGRP_INT_CFG;
block->lfreset_reg = SSO_AF_LF_HWGRP_RST;
+ block->rvu = rvu;
sprintf(block->name, "SSO GROUP");
err = rvu_alloc_bitmap(&block->lf);
if (err) {
@@ -1003,6 +1007,7 @@ ssow:
block->lfcfg_reg = SSOW_PRIV_LFX_HWS_CFG;
block->msixcfg_reg = SSOW_PRIV_LFX_HWS_INT_CFG;
block->lfreset_reg = SSOW_AF_LF_HWS_RST;
+ block->rvu = rvu;
sprintf(block->name, "SSOWS");
err = rvu_alloc_bitmap(&block->lf);
if (err) {
@@ -1028,6 +1033,7 @@ tim:
block->lfcfg_reg = TIM_PRIV_LFX_CFG;
block->msixcfg_reg = TIM_PRIV_LFX_INT_CFG;
block->lfreset_reg = TIM_AF_LF_RST;
+ block->rvu = rvu;
sprintf(block->name, "TIM");
err = rvu_alloc_bitmap(&block->lf);
if (err) {
@@ -1287,6 +1293,60 @@ static int rvu_lookup_rsrc(struct rvu *rvu, struct rvu_block *block,
return (val & 0xFFF);
}
+int rvu_get_blkaddr_from_slot(struct rvu *rvu, int blktype, u16 pcifunc,
+ u16 global_slot, u16 *slot_in_block)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ int numlfs, total_lfs = 0, nr_blocks = 0;
+ int i, num_blkaddr[BLK_COUNT] = { 0 };
+ struct rvu_block *block;
+ int blkaddr;
+ u16 start_slot;
+
+ if (!is_blktype_attached(pfvf, blktype))
+ return -ENODEV;
+
+ /* Get all the block addresses from which LFs are attached to
+ * the given pcifunc in num_blkaddr[].
+ */
+ for (blkaddr = BLKADDR_RVUM; blkaddr < BLK_COUNT; blkaddr++) {
+ block = &rvu->hw->block[blkaddr];
+ if (block->type != blktype)
+ continue;
+ if (!is_block_implemented(rvu->hw, blkaddr))
+ continue;
+
+ numlfs = rvu_get_rsrc_mapcount(pfvf, blkaddr);
+ if (numlfs) {
+ total_lfs += numlfs;
+ num_blkaddr[nr_blocks] = blkaddr;
+ nr_blocks++;
+ }
+ }
+
+ if (global_slot >= total_lfs)
+ return -ENODEV;
+
+ /* Based on the given global slot number retrieve the
+ * correct block address out of all attached block
+ * addresses and slot number in that block.
+ */
+ total_lfs = 0;
+ blkaddr = -ENODEV;
+ for (i = 0; i < nr_blocks; i++) {
+ numlfs = rvu_get_rsrc_mapcount(pfvf, num_blkaddr[i]);
+ total_lfs += numlfs;
+ if (global_slot < total_lfs) {
+ blkaddr = num_blkaddr[i];
+ start_slot = total_lfs - numlfs;
+ *slot_in_block = global_slot - start_slot;
+ break;
+ }
+ }
+
+ return blkaddr;
+}
+
static void rvu_detach_block(struct rvu *rvu, int pcifunc, int blktype)
{
struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
@@ -2345,7 +2405,6 @@ static void rvu_mbox_destroy(struct mbox_wq_info *mw)
int devid;
if (mw->mbox_wq) {
- flush_workqueue(mw->mbox_wq);
destroy_workqueue(mw->mbox_wq);
mw->mbox_wq = NULL;
}
@@ -2473,7 +2532,8 @@ static void rvu_blklf_teardown(struct rvu *rvu, u16 pcifunc, u8 blkaddr)
rvu_npa_lf_teardown(rvu, pcifunc, lf);
else if ((block->addr == BLKADDR_CPT0) ||
(block->addr == BLKADDR_CPT1))
- rvu_cpt_lf_teardown(rvu, pcifunc, lf, slot);
+ rvu_cpt_lf_teardown(rvu, pcifunc, block->addr, lf,
+ slot);
err = rvu_lf_reset(rvu, block, lf);
if (err) {
@@ -2671,6 +2731,8 @@ static void rvu_unregister_interrupts(struct rvu *rvu)
{
int irq;
+ rvu_cpt_unregister_interrupts(rvu);
+
/* Disable the Mbox interrupt */
rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT_ENA_W1C,
INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
@@ -2880,6 +2942,11 @@ static int rvu_register_interrupts(struct rvu *rvu)
goto fail;
}
rvu->irq_allocated[offset] = true;
+
+ ret = rvu_cpt_register_interrupts(rvu);
+ if (ret)
+ goto fail;
+
return 0;
fail:
@@ -2890,7 +2957,6 @@ fail:
static void rvu_flr_wq_destroy(struct rvu *rvu)
{
if (rvu->flr_wq) {
- flush_workqueue(rvu->flr_wq);
destroy_workqueue(rvu->flr_wq);
rvu->flr_wq = NULL;
}
@@ -3186,6 +3252,10 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
mutex_init(&rvu->rswitch.switch_lock);
+ if (rvu->fwdata)
+ ptp_start(rvu->ptp, rvu->fwdata->sclk, rvu->fwdata->ptp_ext_clk_rate,
+ rvu->fwdata->ptp_ext_tstamp);
+
return 0;
err_dl:
rvu_unregister_dl(rvu);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index 1d9411232f1d..66e45d733824 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -101,6 +101,7 @@ struct rvu_block {
u64 msixcfg_reg;
u64 lfreset_reg;
unsigned char name[NAME_SIZE];
+ struct rvu *rvu;
};
struct nix_mcast {
@@ -220,6 +221,7 @@ struct rvu_pfvf {
u16 maxlen;
u16 minlen;
+ bool hw_rx_tstamp_en; /* Is rx_tstamp enabled */
u8 mac_addr[ETH_ALEN]; /* MAC address of this PF/VF */
u8 default_mac[ETH_ALEN]; /* MAC address from FWdata */
@@ -237,6 +239,7 @@ struct rvu_pfvf {
bool cgx_in_use; /* this PF/VF using CGX? */
int cgx_users; /* number of cgx users - used only by PFs */
+ int intf_mode;
u8 nix_blkaddr; /* BLKADDR_NIX0/1 assigned to this PF */
u8 nix_rx_intf; /* NIX0_RX/NIX1_RX interface to NPC */
u8 nix_tx_intf; /* NIX0_TX/NIX1_TX interface to NPC */
@@ -394,7 +397,9 @@ struct rvu_fwdata {
u64 mcam_addr;
u64 mcam_sz;
u64 msixtr_base;
-#define FWDATA_RESERVED_MEM 1023
+ u32 ptp_ext_clk_rate;
+ u32 ptp_ext_tstamp;
+#define FWDATA_RESERVED_MEM 1022
u64 reserved[FWDATA_RESERVED_MEM];
#define CGX_MAX 5
#define CGX_LMACS_MAX 4
@@ -656,6 +661,8 @@ int rvu_lf_reset(struct rvu *rvu, struct rvu_block *block, int lf);
int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc);
int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero);
int rvu_get_num_lbk_chans(void);
+int rvu_get_blkaddr_from_slot(struct rvu *rvu, int blktype, u16 pcifunc,
+ u16 global_slot, u16 *slot_in_block);
/* RVU HW reg validation */
enum regmap_block {
@@ -794,6 +801,7 @@ void npc_enable_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
void npc_read_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
int blkaddr, u16 src, struct mcam_entry *entry,
u8 *intf, u8 *ena);
+bool is_cgx_config_permitted(struct rvu *rvu, u16 pcifunc);
bool is_mac_feature_supported(struct rvu *rvu, int pf, int feature);
u32 rvu_cgx_get_fifolen(struct rvu *rvu);
void *rvu_first_cgx_pdata(struct rvu *rvu);
@@ -805,7 +813,11 @@ bool is_mcam_entry_enabled(struct rvu *rvu, struct npc_mcam *mcam, int blkaddr,
int index);
/* CPT APIs */
-int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int lf, int slot);
+int rvu_cpt_register_interrupts(struct rvu *rvu);
+void rvu_cpt_unregister_interrupts(struct rvu *rvu);
+int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int lf,
+ int slot);
+int rvu_cpt_ctx_flush(struct rvu *rvu, u16 pcifunc);
/* CN10K RVU */
int rvu_set_channels_base(struct rvu *rvu);
@@ -827,4 +839,7 @@ void rvu_switch_enable(struct rvu *rvu);
void rvu_switch_disable(struct rvu *rvu);
void rvu_switch_update_rules(struct rvu *rvu, u16 pcifunc);
+int rvu_npc_set_parse_mode(struct rvu *rvu, u16 pcifunc, u64 mode, u8 dir,
+ u64 pkind, u8 var_len_off, u8 var_len_off_mask,
+ u8 shift_dir);
#endif /* RVU_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
index 81e8ea9ee30e..2ca182a4ce82 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
@@ -324,7 +324,6 @@ static int cgx_lmac_event_handler_init(struct rvu *rvu)
static void rvu_cgx_wq_destroy(struct rvu *rvu)
{
if (rvu->cgx_evh_wq) {
- flush_workqueue(rvu->cgx_evh_wq);
destroy_workqueue(rvu->cgx_evh_wq);
rvu->cgx_evh_wq = NULL;
}
@@ -411,7 +410,7 @@ int rvu_cgx_exit(struct rvu *rvu)
* VF's of mapped PF and other PFs are not allowed. This fn() checks
* whether a PFFUNC is permitted to do the config or not.
*/
-static bool is_cgx_config_permitted(struct rvu *rvu, u16 pcifunc)
+inline bool is_cgx_config_permitted(struct rvu *rvu, u16 pcifunc)
{
if ((pcifunc & RVU_PFVF_FUNC_MASK) ||
!is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc)))
@@ -694,7 +693,9 @@ int rvu_mbox_handler_cgx_promisc_disable(struct rvu *rvu, struct msg_req *req,
static int rvu_cgx_ptp_rx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)
{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
int pf = rvu_get_pf(pcifunc);
+ struct mac_ops *mac_ops;
u8 cgx_id, lmac_id;
void *cgxd;
@@ -711,13 +712,16 @@ static int rvu_cgx_ptp_rx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
cgxd = rvu_cgx_pdata(cgx_id, rvu);
- cgx_lmac_ptp_config(cgxd, lmac_id, enable);
+ mac_ops = get_mac_ops(cgxd);
+ mac_ops->mac_enadis_ptp_config(cgxd, lmac_id, true);
/* If PTP is enabled then inform NPC that packets to be
* parsed by this PF will have their data shifted by 8 bytes
* and if PTP is disabled then no shift is required
*/
if (npc_config_ts_kpuaction(rvu, pf, pcifunc, enable))
return -EINVAL;
+ /* This flag is required to clean up CGX conf if app gets killed */
+ pfvf->hw_rx_tstamp_en = enable;
return 0;
}
@@ -725,6 +729,9 @@ static int rvu_cgx_ptp_rx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)
int rvu_mbox_handler_cgx_ptp_rx_enable(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
+ if (!is_pf_cgxmapped(rvu, rvu_get_pf(req->hdr.pcifunc)))
+ return -EPERM;
+
return rvu_cgx_ptp_rx_cfg(rvu, req->hdr.pcifunc, true);
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
index 46a41cfff575..7dbbc115cde4 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
@@ -334,8 +334,8 @@ int rvu_set_channels_base(struct rvu *rvu)
/* Out of 4096 channels start CPT from 2048 so
* that MSB for CPT channels is always set
*/
- if (cpt_chan_base <= 0x800) {
- hw->cpt_chan_base = 0x800;
+ if (cpt_chan_base <= NIX_CHAN_CPT_CH_START) {
+ hw->cpt_chan_base = NIX_CHAN_CPT_CH_START;
} else {
dev_err(rvu->dev,
"CPT channels could not fit in the range 2048-4095\n");
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
index 1f90a7403392..45357deecabb 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
@@ -37,6 +37,236 @@
(_rsp)->free_sts_##etype = free_sts; \
})
+static irqreturn_t rvu_cpt_af_flt_intr_handler(int irq, void *ptr)
+{
+ struct rvu_block *block = ptr;
+ struct rvu *rvu = block->rvu;
+ int blkaddr = block->addr;
+ u64 reg0, reg1, reg2;
+
+ reg0 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(0));
+ reg1 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(1));
+ if (!is_rvu_otx2(rvu)) {
+ reg2 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(2));
+ dev_err_ratelimited(rvu->dev,
+ "Received CPTAF FLT irq : 0x%llx, 0x%llx, 0x%llx",
+ reg0, reg1, reg2);
+ } else {
+ dev_err_ratelimited(rvu->dev,
+ "Received CPTAF FLT irq : 0x%llx, 0x%llx",
+ reg0, reg1);
+ }
+
+ rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT(0), reg0);
+ rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT(1), reg1);
+ if (!is_rvu_otx2(rvu))
+ rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT(2), reg2);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rvu_cpt_af_rvu_intr_handler(int irq, void *ptr)
+{
+ struct rvu_block *block = ptr;
+ struct rvu *rvu = block->rvu;
+ int blkaddr = block->addr;
+ u64 reg;
+
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_RVU_INT);
+ dev_err_ratelimited(rvu->dev, "Received CPTAF RVU irq : 0x%llx", reg);
+
+ rvu_write64(rvu, blkaddr, CPT_AF_RVU_INT, reg);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rvu_cpt_af_ras_intr_handler(int irq, void *ptr)
+{
+ struct rvu_block *block = ptr;
+ struct rvu *rvu = block->rvu;
+ int blkaddr = block->addr;
+ u64 reg;
+
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_RAS_INT);
+ dev_err_ratelimited(rvu->dev, "Received CPTAF RAS irq : 0x%llx", reg);
+
+ rvu_write64(rvu, blkaddr, CPT_AF_RAS_INT, reg);
+ return IRQ_HANDLED;
+}
+
+static int rvu_cpt_do_register_interrupt(struct rvu_block *block, int irq_offs,
+ irq_handler_t handler,
+ const char *name)
+{
+ struct rvu *rvu = block->rvu;
+ int ret;
+
+ ret = request_irq(pci_irq_vector(rvu->pdev, irq_offs), handler, 0,
+ name, block);
+ if (ret) {
+ dev_err(rvu->dev, "RVUAF: %s irq registration failed", name);
+ return ret;
+ }
+
+ WARN_ON(rvu->irq_allocated[irq_offs]);
+ rvu->irq_allocated[irq_offs] = true;
+ return 0;
+}
+
+static void cpt_10k_unregister_interrupts(struct rvu_block *block, int off)
+{
+ struct rvu *rvu = block->rvu;
+ int blkaddr = block->addr;
+ int i;
+
+ /* Disable all CPT AF interrupts */
+ for (i = 0; i < CPT_10K_AF_INT_VEC_RVU; i++)
+ rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(i), 0x1);
+ rvu_write64(rvu, blkaddr, CPT_AF_RVU_INT_ENA_W1C, 0x1);
+ rvu_write64(rvu, blkaddr, CPT_AF_RAS_INT_ENA_W1C, 0x1);
+
+ for (i = 0; i < CPT_10K_AF_INT_VEC_CNT; i++)
+ if (rvu->irq_allocated[off + i]) {
+ free_irq(pci_irq_vector(rvu->pdev, off + i), block);
+ rvu->irq_allocated[off + i] = false;
+ }
+}
+
+static void cpt_unregister_interrupts(struct rvu *rvu, int blkaddr)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ int i, offs;
+
+ if (!is_block_implemented(rvu->hw, blkaddr))
+ return;
+ offs = rvu_read64(rvu, blkaddr, CPT_PRIV_AF_INT_CFG) & 0x7FF;
+ if (!offs) {
+ dev_warn(rvu->dev,
+ "Failed to get CPT_AF_INT vector offsets\n");
+ return;
+ }
+ block = &hw->block[blkaddr];
+ if (!is_rvu_otx2(rvu))
+ return cpt_10k_unregister_interrupts(block, offs);
+
+ /* Disable all CPT AF interrupts */
+ for (i = 0; i < CPT_AF_INT_VEC_RVU; i++)
+ rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(i), 0x1);
+ rvu_write64(rvu, blkaddr, CPT_AF_RVU_INT_ENA_W1C, 0x1);
+ rvu_write64(rvu, blkaddr, CPT_AF_RAS_INT_ENA_W1C, 0x1);
+
+ for (i = 0; i < CPT_AF_INT_VEC_CNT; i++)
+ if (rvu->irq_allocated[offs + i]) {
+ free_irq(pci_irq_vector(rvu->pdev, offs + i), block);
+ rvu->irq_allocated[offs + i] = false;
+ }
+}
+
+void rvu_cpt_unregister_interrupts(struct rvu *rvu)
+{
+ cpt_unregister_interrupts(rvu, BLKADDR_CPT0);
+ cpt_unregister_interrupts(rvu, BLKADDR_CPT1);
+}
+
+static int cpt_10k_register_interrupts(struct rvu_block *block, int off)
+{
+ struct rvu *rvu = block->rvu;
+ int blkaddr = block->addr;
+ char irq_name[16];
+ int i, ret;
+
+ for (i = CPT_10K_AF_INT_VEC_FLT0; i < CPT_10K_AF_INT_VEC_RVU; i++) {
+ snprintf(irq_name, sizeof(irq_name), "CPTAF FLT%d", i);
+ ret = rvu_cpt_do_register_interrupt(block, off + i,
+ rvu_cpt_af_flt_intr_handler,
+ irq_name);
+ if (ret)
+ goto err;
+ rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1S(i), 0x1);
+ }
+
+ ret = rvu_cpt_do_register_interrupt(block, off + CPT_10K_AF_INT_VEC_RVU,
+ rvu_cpt_af_rvu_intr_handler,
+ "CPTAF RVU");
+ if (ret)
+ goto err;
+ rvu_write64(rvu, blkaddr, CPT_AF_RVU_INT_ENA_W1S, 0x1);
+
+ ret = rvu_cpt_do_register_interrupt(block, off + CPT_10K_AF_INT_VEC_RAS,
+ rvu_cpt_af_ras_intr_handler,
+ "CPTAF RAS");
+ if (ret)
+ goto err;
+ rvu_write64(rvu, blkaddr, CPT_AF_RAS_INT_ENA_W1S, 0x1);
+
+ return 0;
+err:
+ rvu_cpt_unregister_interrupts(rvu);
+ return ret;
+}
+
+static int cpt_register_interrupts(struct rvu *rvu, int blkaddr)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ int i, offs, ret = 0;
+ char irq_name[16];
+
+ if (!is_block_implemented(rvu->hw, blkaddr))
+ return 0;
+
+ block = &hw->block[blkaddr];
+ offs = rvu_read64(rvu, blkaddr, CPT_PRIV_AF_INT_CFG) & 0x7FF;
+ if (!offs) {
+ dev_warn(rvu->dev,
+ "Failed to get CPT_AF_INT vector offsets\n");
+ return 0;
+ }
+
+ if (!is_rvu_otx2(rvu))
+ return cpt_10k_register_interrupts(block, offs);
+
+ for (i = CPT_AF_INT_VEC_FLT0; i < CPT_AF_INT_VEC_RVU; i++) {
+ snprintf(irq_name, sizeof(irq_name), "CPTAF FLT%d", i);
+ ret = rvu_cpt_do_register_interrupt(block, offs + i,
+ rvu_cpt_af_flt_intr_handler,
+ irq_name);
+ if (ret)
+ goto err;
+ rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1S(i), 0x1);
+ }
+
+ ret = rvu_cpt_do_register_interrupt(block, offs + CPT_AF_INT_VEC_RVU,
+ rvu_cpt_af_rvu_intr_handler,
+ "CPTAF RVU");
+ if (ret)
+ goto err;
+ rvu_write64(rvu, blkaddr, CPT_AF_RVU_INT_ENA_W1S, 0x1);
+
+ ret = rvu_cpt_do_register_interrupt(block, offs + CPT_AF_INT_VEC_RAS,
+ rvu_cpt_af_ras_intr_handler,
+ "CPTAF RAS");
+ if (ret)
+ goto err;
+ rvu_write64(rvu, blkaddr, CPT_AF_RAS_INT_ENA_W1S, 0x1);
+
+ return 0;
+err:
+ rvu_cpt_unregister_interrupts(rvu);
+ return ret;
+}
+
+int rvu_cpt_register_interrupts(struct rvu *rvu)
+{
+ int ret;
+
+ ret = cpt_register_interrupts(rvu, BLKADDR_CPT0);
+ if (ret)
+ return ret;
+
+ return cpt_register_interrupts(rvu, BLKADDR_CPT1);
+}
+
static int get_cpt_pf_num(struct rvu *rvu)
{
int i, domain_nr, cpt_pf_num = -1;
@@ -147,9 +377,13 @@ int rvu_mbox_handler_cpt_lf_alloc(struct rvu *rvu,
rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf), val);
- /* Set CPT LF NIX_PF_FUNC and SSO_PF_FUNC */
- val = (u64)req->nix_pf_func << 48 |
- (u64)req->sso_pf_func << 32;
+ /* Set CPT LF NIX_PF_FUNC and SSO_PF_FUNC. EXE_LDWB is set
+ * on reset.
+ */
+ val = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf));
+ val &= ~(GENMASK_ULL(63, 48) | GENMASK_ULL(47, 32));
+ val |= ((u64)req->nix_pf_func << 48 |
+ (u64)req->sso_pf_func << 32);
rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf), val);
}
@@ -159,7 +393,7 @@ int rvu_mbox_handler_cpt_lf_alloc(struct rvu *rvu,
static int cpt_lf_free(struct rvu *rvu, struct msg_req *req, int blkaddr)
{
u16 pcifunc = req->hdr.pcifunc;
- int num_lfs, cptlf, slot;
+ int num_lfs, cptlf, slot, err;
struct rvu_block *block;
block = &rvu->hw->block[blkaddr];
@@ -173,10 +407,15 @@ static int cpt_lf_free(struct rvu *rvu, struct msg_req *req, int blkaddr)
if (cptlf < 0)
return CPT_AF_ERR_LF_INVALID;
- /* Reset CPT LF group and priority */
- rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf), 0x0);
- /* Reset CPT LF NIX_PF_FUNC and SSO_PF_FUNC */
- rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf), 0x0);
+ /* Perform teardown */
+ rvu_cpt_lf_teardown(rvu, pcifunc, blkaddr, cptlf, slot);
+
+ /* Reset LF */
+ err = rvu_lf_reset(rvu, block, cptlf);
+ if (err) {
+ dev_err(rvu->dev, "Failed to reset blkaddr %d LF%d\n",
+ block->addr, cptlf);
+ }
}
return 0;
@@ -197,6 +436,141 @@ int rvu_mbox_handler_cpt_lf_free(struct rvu *rvu, struct msg_req *req,
return ret;
}
+static int cpt_inline_ipsec_cfg_inbound(struct rvu *rvu, int blkaddr, u8 cptlf,
+ struct cpt_inline_ipsec_cfg_msg *req)
+{
+ u16 sso_pf_func = req->sso_pf_func;
+ u8 nix_sel;
+ u64 val;
+
+ val = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf));
+ if (req->enable && (val & BIT_ULL(16))) {
+ /* IPSec inline outbound path is already enabled for a given
+ * CPT LF, HRM states that inline inbound & outbound paths
+ * must not be enabled at the same time for a given CPT LF
+ */
+ return CPT_AF_ERR_INLINE_IPSEC_INB_ENA;
+ }
+ /* Check if requested 'CPTLF <=> SSOLF' mapping is valid */
+ if (sso_pf_func && !is_pffunc_map_valid(rvu, sso_pf_func, BLKTYPE_SSO))
+ return CPT_AF_ERR_SSO_PF_FUNC_INVALID;
+
+ nix_sel = (blkaddr == BLKADDR_CPT1) ? 1 : 0;
+ /* Enable CPT LF for IPsec inline inbound operations */
+ if (req->enable)
+ val |= BIT_ULL(9);
+ else
+ val &= ~BIT_ULL(9);
+
+ val |= (u64)nix_sel << 8;
+ rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf), val);
+
+ if (sso_pf_func) {
+ /* Set SSO_PF_FUNC */
+ val = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf));
+ val |= (u64)sso_pf_func << 32;
+ val |= (u64)req->nix_pf_func << 48;
+ rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf), val);
+ }
+ if (req->sso_pf_func_ovrd)
+ /* Set SSO_PF_FUNC_OVRD for inline IPSec */
+ rvu_write64(rvu, blkaddr, CPT_AF_ECO, 0x1);
+
+ /* Configure the X2P Link register with the cpt base channel number and
+ * range of channels it should propagate to X2P
+ */
+ if (!is_rvu_otx2(rvu)) {
+ val = (ilog2(NIX_CHAN_CPT_X2P_MASK + 1) << 16);
+ val |= rvu->hw->cpt_chan_base;
+
+ rvu_write64(rvu, blkaddr, CPT_AF_X2PX_LINK_CFG(0), val);
+ rvu_write64(rvu, blkaddr, CPT_AF_X2PX_LINK_CFG(1), val);
+ }
+
+ return 0;
+}
+
+static int cpt_inline_ipsec_cfg_outbound(struct rvu *rvu, int blkaddr, u8 cptlf,
+ struct cpt_inline_ipsec_cfg_msg *req)
+{
+ u16 nix_pf_func = req->nix_pf_func;
+ int nix_blkaddr;
+ u8 nix_sel;
+ u64 val;
+
+ val = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf));
+ if (req->enable && (val & BIT_ULL(9))) {
+ /* IPSec inline inbound path is already enabled for a given
+ * CPT LF, HRM states that inline inbound & outbound paths
+ * must not be enabled at the same time for a given CPT LF
+ */
+ return CPT_AF_ERR_INLINE_IPSEC_OUT_ENA;
+ }
+
+ /* Check if requested 'CPTLF <=> NIXLF' mapping is valid */
+ if (nix_pf_func && !is_pffunc_map_valid(rvu, nix_pf_func, BLKTYPE_NIX))
+ return CPT_AF_ERR_NIX_PF_FUNC_INVALID;
+
+ /* Enable CPT LF for IPsec inline outbound operations */
+ if (req->enable)
+ val |= BIT_ULL(16);
+ else
+ val &= ~BIT_ULL(16);
+ rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf), val);
+
+ if (nix_pf_func) {
+ /* Set NIX_PF_FUNC */
+ val = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf));
+ val |= (u64)nix_pf_func << 48;
+ rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf), val);
+
+ nix_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, nix_pf_func);
+ nix_sel = (nix_blkaddr == BLKADDR_NIX0) ? 0 : 1;
+
+ val = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf));
+ val |= (u64)nix_sel << 8;
+ rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf), val);
+ }
+
+ return 0;
+}
+
+int rvu_mbox_handler_cpt_inline_ipsec_cfg(struct rvu *rvu,
+ struct cpt_inline_ipsec_cfg_msg *req,
+ struct msg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_block *block;
+ int cptlf, blkaddr, ret;
+ u16 actual_slot;
+
+ blkaddr = rvu_get_blkaddr_from_slot(rvu, BLKTYPE_CPT, pcifunc,
+ req->slot, &actual_slot);
+ if (blkaddr < 0)
+ return CPT_AF_ERR_LF_INVALID;
+
+ block = &rvu->hw->block[blkaddr];
+
+ cptlf = rvu_get_lf(rvu, block, pcifunc, actual_slot);
+ if (cptlf < 0)
+ return CPT_AF_ERR_LF_INVALID;
+
+ switch (req->dir) {
+ case CPT_INLINE_INBOUND:
+ ret = cpt_inline_ipsec_cfg_inbound(rvu, blkaddr, cptlf, req);
+ break;
+
+ case CPT_INLINE_OUTBOUND:
+ ret = cpt_inline_ipsec_cfg_outbound(rvu, blkaddr, cptlf, req);
+ break;
+
+ default:
+ return CPT_AF_ERR_PARAM;
+ }
+
+ return ret;
+}
+
static bool is_valid_offset(struct rvu *rvu, struct cpt_rd_wr_reg_msg *req)
{
u64 offset = req->reg_offset;
@@ -421,6 +795,58 @@ int rvu_mbox_handler_cpt_rxc_time_cfg(struct rvu *rvu,
return 0;
}
+int rvu_mbox_handler_cpt_ctx_cache_sync(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ return rvu_cpt_ctx_flush(rvu, req->hdr.pcifunc);
+}
+
+static void cpt_rxc_teardown(struct rvu *rvu, int blkaddr)
+{
+ struct cpt_rxc_time_cfg_req req;
+ int timeout = 2000;
+ u64 reg;
+
+ if (is_rvu_otx2(rvu))
+ return;
+
+ /* Set time limit to minimum values, so that rxc entries will be
+ * flushed out quickly.
+ */
+ req.step = 1;
+ req.zombie_thres = 1;
+ req.zombie_limit = 1;
+ req.active_thres = 1;
+ req.active_limit = 1;
+
+ cpt_rxc_time_cfg(rvu, &req, blkaddr);
+
+ do {
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_RXC_ACTIVE_STS);
+ udelay(1);
+ if (FIELD_GET(RXC_ACTIVE_COUNT, reg))
+ timeout--;
+ else
+ break;
+ } while (timeout);
+
+ if (timeout == 0)
+ dev_warn(rvu->dev, "Poll for RXC active count hits hard loop counter\n");
+
+ timeout = 2000;
+ do {
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_RXC_ZOMBIE_STS);
+ udelay(1);
+ if (FIELD_GET(RXC_ZOMBIE_COUNT, reg))
+ timeout--;
+ else
+ break;
+ } while (timeout);
+
+ if (timeout == 0)
+ dev_warn(rvu->dev, "Poll for RXC zombie count hits hard loop counter\n");
+}
+
#define INPROG_INFLIGHT(reg) ((reg) & 0x1FF)
#define INPROG_GRB_PARTIAL(reg) ((reg) & BIT_ULL(31))
#define INPROG_GRB(reg) (((reg) >> 32) & 0xFF)
@@ -485,14 +911,12 @@ static void cpt_lf_disable_iqueue(struct rvu *rvu, int blkaddr, int slot)
dev_warn(rvu->dev, "CPT FLR hits hard loop counter\n");
}
-int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int lf, int slot)
+int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int lf, int slot)
{
- int blkaddr;
u64 reg;
- blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_CPT, pcifunc);
- if (blkaddr != BLKADDR_CPT0 && blkaddr != BLKADDR_CPT1)
- return -EINVAL;
+ if (is_cpt_pf(rvu, pcifunc) || is_cpt_vf(rvu, pcifunc))
+ cpt_rxc_teardown(rvu, blkaddr);
/* Enable BAR2 ALIAS for this pcifunc. */
reg = BIT_ULL(16) | pcifunc;
@@ -509,3 +933,154 @@ int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int lf, int slot)
return 0;
}
+
+#define CPT_RES_LEN 16
+#define CPT_SE_IE_EGRP 1ULL
+
+static int cpt_inline_inb_lf_cmd_send(struct rvu *rvu, int blkaddr,
+ int nix_blkaddr)
+{
+ int cpt_pf_num = get_cpt_pf_num(rvu);
+ struct cpt_inst_lmtst_req *req;
+ dma_addr_t res_daddr;
+ int timeout = 3000;
+ u8 cpt_idx;
+ u64 *inst;
+ u16 *res;
+ int rc;
+
+ res = kzalloc(CPT_RES_LEN, GFP_KERNEL);
+ if (!res)
+ return -ENOMEM;
+
+ res_daddr = dma_map_single(rvu->dev, res, CPT_RES_LEN,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(rvu->dev, res_daddr)) {
+ dev_err(rvu->dev, "DMA mapping failed for CPT result\n");
+ rc = -EFAULT;
+ goto res_free;
+ }
+ *res = 0xFFFF;
+
+ /* Send mbox message to CPT PF */
+ req = (struct cpt_inst_lmtst_req *)
+ otx2_mbox_alloc_msg_rsp(&rvu->afpf_wq_info.mbox_up,
+ cpt_pf_num, sizeof(*req),
+ sizeof(struct msg_rsp));
+ if (!req) {
+ rc = -ENOMEM;
+ goto res_daddr_unmap;
+ }
+ req->hdr.sig = OTX2_MBOX_REQ_SIG;
+ req->hdr.id = MBOX_MSG_CPT_INST_LMTST;
+
+ inst = req->inst;
+ /* Prepare CPT_INST_S */
+ inst[0] = 0;
+ inst[1] = res_daddr;
+ /* AF PF FUNC */
+ inst[2] = 0;
+ /* Set QORD */
+ inst[3] = 1;
+ inst[4] = 0;
+ inst[5] = 0;
+ inst[6] = 0;
+ /* Set EGRP */
+ inst[7] = CPT_SE_IE_EGRP << 61;
+
+ /* Subtract 1 from the NIX-CPT credit count to preserve
+ * credit counts.
+ */
+ cpt_idx = (blkaddr == BLKADDR_CPT0) ? 0 : 1;
+ rvu_write64(rvu, nix_blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx),
+ BIT_ULL(22) - 1);
+
+ otx2_mbox_msg_send(&rvu->afpf_wq_info.mbox_up, cpt_pf_num);
+ rc = otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, cpt_pf_num);
+ if (rc)
+ dev_warn(rvu->dev, "notification to pf %d failed\n",
+ cpt_pf_num);
+ /* Wait for CPT instruction to be completed */
+ do {
+ mdelay(1);
+ if (*res == 0xFFFF)
+ timeout--;
+ else
+ break;
+ } while (timeout);
+
+ if (timeout == 0)
+ dev_warn(rvu->dev, "Poll for result hits hard loop counter\n");
+
+res_daddr_unmap:
+ dma_unmap_single(rvu->dev, res_daddr, CPT_RES_LEN, DMA_BIDIRECTIONAL);
+res_free:
+ kfree(res);
+
+ return 0;
+}
+
+#define CTX_CAM_PF_FUNC GENMASK_ULL(61, 46)
+#define CTX_CAM_CPTR GENMASK_ULL(45, 0)
+
+int rvu_cpt_ctx_flush(struct rvu *rvu, u16 pcifunc)
+{
+ int nix_blkaddr, blkaddr;
+ u16 max_ctx_entries, i;
+ int slot = 0, num_lfs;
+ u64 reg, cam_data;
+ int rc;
+
+ nix_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (nix_blkaddr < 0)
+ return -EINVAL;
+
+ if (is_rvu_otx2(rvu))
+ return 0;
+
+ blkaddr = (nix_blkaddr == BLKADDR_NIX1) ? BLKADDR_CPT1 : BLKADDR_CPT0;
+
+ /* Submit CPT_INST_S to track when all packets have been
+ * flushed through for the NIX PF FUNC in inline inbound case.
+ */
+ rc = cpt_inline_inb_lf_cmd_send(rvu, blkaddr, nix_blkaddr);
+ if (rc)
+ return rc;
+
+ /* Wait for rxc entries to be flushed out */
+ cpt_rxc_teardown(rvu, blkaddr);
+
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS0);
+ max_ctx_entries = (reg >> 48) & 0xFFF;
+
+ mutex_lock(&rvu->rsrc_lock);
+
+ num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, pcifunc),
+ blkaddr);
+ if (num_lfs == 0) {
+ dev_warn(rvu->dev, "CPT LF is not configured\n");
+ goto unlock;
+ }
+
+ /* Enable BAR2 ALIAS for this pcifunc. */
+ reg = BIT_ULL(16) | pcifunc;
+ rvu_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, reg);
+
+ for (i = 0; i < max_ctx_entries; i++) {
+ cam_data = rvu_read64(rvu, blkaddr, CPT_AF_CTX_CAM_DATA(i));
+
+ if ((FIELD_GET(CTX_CAM_PF_FUNC, cam_data) == pcifunc) &&
+ FIELD_GET(CTX_CAM_CPTR, cam_data)) {
+ reg = BIT_ULL(46) | FIELD_GET(CTX_CAM_CPTR, cam_data);
+ rvu_write64(rvu, blkaddr,
+ CPT_AF_BAR2_ALIASX(slot, CPT_LF_CTX_FLUSH),
+ reg);
+ }
+ }
+ rvu_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, 0);
+
+unlock:
+ mutex_unlock(&rvu->rsrc_lock);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
index 274d3abe30eb..70bacd38a6d9 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
@@ -1510,13 +1510,6 @@ int rvu_register_dl(struct rvu *rvu)
return -ENOMEM;
}
- err = devlink_register(dl);
- if (err) {
- dev_err(rvu->dev, "devlink register failed with error %d\n", err);
- devlink_free(dl);
- return err;
- }
-
rvu_dl = devlink_priv(dl);
rvu_dl->dl = dl;
rvu_dl->rvu = rvu;
@@ -1537,13 +1530,11 @@ int rvu_register_dl(struct rvu *rvu)
goto err_dl_health;
}
- devlink_params_publish(dl);
-
+ devlink_register(dl);
return 0;
err_dl_health:
rvu_health_reporters_destroy(rvu);
- devlink_unregister(dl);
devlink_free(dl);
return err;
}
@@ -1553,12 +1544,9 @@ void rvu_unregister_dl(struct rvu *rvu)
struct rvu_devlink *rvu_dl = rvu->rvu_dl;
struct devlink *dl = rvu_dl->dl;
- if (!dl)
- return;
-
+ devlink_unregister(dl);
devlink_params_unregister(dl, rvu_af_dl_params,
ARRAY_SIZE(rvu_af_dl_params));
rvu_health_reporters_destroy(rvu);
- devlink_unregister(dl);
devlink_free(dl);
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index 9ef4e942e31e..7761dcf17b91 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -28,6 +28,7 @@ static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req,
static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc);
static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw,
u32 leaf_prof);
+static const char *nix_get_ctx_name(int ctype);
enum mc_tbl_sz {
MC_TBL_SZ_256,
@@ -1061,10 +1062,68 @@ static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw,
return 0;
}
+static int rvu_nix_verify_aq_ctx(struct rvu *rvu, struct nix_hw *nix_hw,
+ struct nix_aq_enq_req *req, u8 ctype)
+{
+ struct nix_cn10k_aq_enq_req aq_req;
+ struct nix_cn10k_aq_enq_rsp aq_rsp;
+ int rc, word;
+
+ if (req->ctype != NIX_AQ_CTYPE_CQ)
+ return 0;
+
+ rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp,
+ req->hdr.pcifunc, ctype, req->qidx);
+ if (rc) {
+ dev_err(rvu->dev,
+ "%s: Failed to fetch %s%d context of PFFUNC 0x%x\n",
+ __func__, nix_get_ctx_name(ctype), req->qidx,
+ req->hdr.pcifunc);
+ return rc;
+ }
+
+ /* Make copy of original context & mask which are required
+ * for resubmission
+ */
+ memcpy(&aq_req.cq_mask, &req->cq_mask, sizeof(struct nix_cq_ctx_s));
+ memcpy(&aq_req.cq, &req->cq, sizeof(struct nix_cq_ctx_s));
+
+ /* exclude fields which HW can update */
+ aq_req.cq_mask.cq_err = 0;
+ aq_req.cq_mask.wrptr = 0;
+ aq_req.cq_mask.tail = 0;
+ aq_req.cq_mask.head = 0;
+ aq_req.cq_mask.avg_level = 0;
+ aq_req.cq_mask.update_time = 0;
+ aq_req.cq_mask.substream = 0;
+
+ /* Context mask (cq_mask) holds mask value of fields which
+ * are changed in AQ WRITE operation.
+ * for example cq.drop = 0xa;
+ * cq_mask.drop = 0xff;
+ * Below logic performs '&' between cq and cq_mask so that non
+ * updated fields are masked out for request and response
+ * comparison
+ */
+ for (word = 0; word < sizeof(struct nix_cq_ctx_s) / sizeof(u64);
+ word++) {
+ *(u64 *)((u8 *)&aq_rsp.cq + word * 8) &=
+ (*(u64 *)((u8 *)&aq_req.cq_mask + word * 8));
+ *(u64 *)((u8 *)&aq_req.cq + word * 8) &=
+ (*(u64 *)((u8 *)&aq_req.cq_mask + word * 8));
+ }
+
+ if (memcmp(&aq_req.cq, &aq_rsp.cq, sizeof(struct nix_cq_ctx_s)))
+ return NIX_AF_ERR_AQ_CTX_RETRY_WRITE;
+
+ return 0;
+}
+
static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
struct nix_aq_enq_rsp *rsp)
{
struct nix_hw *nix_hw;
+ int err, retries = 5;
int blkaddr;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc);
@@ -1075,7 +1134,24 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
if (!nix_hw)
return NIX_AF_ERR_INVALID_NIXBLK;
- return rvu_nix_blk_aq_enq_inst(rvu, nix_hw, req, rsp);
+retry:
+ err = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, req, rsp);
+
+ /* HW errata 'AQ Modification to CQ could be discarded on heavy traffic'
+ * As a work around perfrom CQ context read after each AQ write. If AQ
+ * read shows AQ write is not updated perform AQ write again.
+ */
+ if (!err && req->op == NIX_AQ_INSTOP_WRITE) {
+ err = rvu_nix_verify_aq_ctx(rvu, nix_hw, req, NIX_AQ_CTYPE_CQ);
+ if (err == NIX_AF_ERR_AQ_CTX_RETRY_WRITE) {
+ if (retries--)
+ goto retry;
+ else
+ return NIX_AF_ERR_CQ_CTX_WRITE_ERR;
+ }
+ }
+
+ return err;
}
static const char *nix_get_ctx_name(int ctype)
@@ -4436,10 +4512,17 @@ int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
return rvu_cgx_start_stop_io(rvu, pcifunc, false);
}
+#define RX_SA_BASE GENMASK_ULL(52, 7)
+
void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf)
{
struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
struct hwctx_disable_req ctx_req;
+ int pf = rvu_get_pf(pcifunc);
+ struct mac_ops *mac_ops;
+ u8 cgx_id, lmac_id;
+ u64 sa_base;
+ void *cgxd;
int err;
ctx_req.hdr.pcifunc = pcifunc;
@@ -4476,9 +4559,33 @@ void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf)
dev_err(rvu->dev, "CQ ctx disable failed\n");
}
+ /* reset HW config done for Switch headers */
+ rvu_npc_set_parse_mode(rvu, pcifunc, OTX2_PRIV_FLAGS_DEFAULT,
+ (PKIND_TX | PKIND_RX), 0, 0, 0, 0);
+
+ /* Disabling CGX and NPC config done for PTP */
+ if (pfvf->hw_rx_tstamp_en) {
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ cgxd = rvu_cgx_pdata(cgx_id, rvu);
+ mac_ops = get_mac_ops(cgxd);
+ mac_ops->mac_enadis_ptp_config(cgxd, lmac_id, false);
+ /* Undo NPC config done for PTP */
+ if (npc_config_ts_kpuaction(rvu, pf, pcifunc, false))
+ dev_err(rvu->dev, "NPC config for PTP failed\n");
+ pfvf->hw_rx_tstamp_en = false;
+ }
+
nix_ctx_free(rvu, pfvf);
nix_free_all_bandprof(rvu, pcifunc);
+
+ sa_base = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(nixlf));
+ if (FIELD_GET(RX_SA_BASE, sa_base)) {
+ err = rvu_cpt_ctx_flush(rvu, pcifunc);
+ if (err)
+ dev_err(rvu->dev,
+ "CPT ctx flush failed with error: %d\n", err);
+ }
}
#define NIX_AF_LFX_TX_CFG_PTP_EN BIT_ULL(32)
@@ -4579,6 +4686,119 @@ int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu,
return 0;
}
+#define IPSEC_GEN_CFG_EGRP GENMASK_ULL(50, 48)
+#define IPSEC_GEN_CFG_OPCODE GENMASK_ULL(47, 32)
+#define IPSEC_GEN_CFG_PARAM1 GENMASK_ULL(31, 16)
+#define IPSEC_GEN_CFG_PARAM2 GENMASK_ULL(15, 0)
+
+#define CPT_INST_QSEL_BLOCK GENMASK_ULL(28, 24)
+#define CPT_INST_QSEL_PF_FUNC GENMASK_ULL(23, 8)
+#define CPT_INST_QSEL_SLOT GENMASK_ULL(7, 0)
+
+static void nix_inline_ipsec_cfg(struct rvu *rvu, struct nix_inline_ipsec_cfg *req,
+ int blkaddr)
+{
+ u8 cpt_idx, cpt_blkaddr;
+ u64 val;
+
+ cpt_idx = (blkaddr == BLKADDR_NIX0) ? 0 : 1;
+ if (req->enable) {
+ val = 0;
+ /* Enable context prefetching */
+ if (!is_rvu_otx2(rvu))
+ val |= BIT_ULL(51);
+
+ /* Set OPCODE and EGRP */
+ val |= FIELD_PREP(IPSEC_GEN_CFG_EGRP, req->gen_cfg.egrp);
+ val |= FIELD_PREP(IPSEC_GEN_CFG_OPCODE, req->gen_cfg.opcode);
+ val |= FIELD_PREP(IPSEC_GEN_CFG_PARAM1, req->gen_cfg.param1);
+ val |= FIELD_PREP(IPSEC_GEN_CFG_PARAM2, req->gen_cfg.param2);
+
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_IPSEC_GEN_CFG, val);
+
+ /* Set CPT queue for inline IPSec */
+ val = FIELD_PREP(CPT_INST_QSEL_SLOT, req->inst_qsel.cpt_slot);
+ val |= FIELD_PREP(CPT_INST_QSEL_PF_FUNC,
+ req->inst_qsel.cpt_pf_func);
+
+ if (!is_rvu_otx2(rvu)) {
+ cpt_blkaddr = (cpt_idx == 0) ? BLKADDR_CPT0 :
+ BLKADDR_CPT1;
+ val |= FIELD_PREP(CPT_INST_QSEL_BLOCK, cpt_blkaddr);
+ }
+
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_INST_QSEL(cpt_idx),
+ val);
+
+ /* Set CPT credit */
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx),
+ req->cpt_credit);
+ } else {
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_IPSEC_GEN_CFG, 0x0);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_INST_QSEL(cpt_idx),
+ 0x0);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx),
+ 0x3FFFFF);
+ }
+}
+
+int rvu_mbox_handler_nix_inline_ipsec_cfg(struct rvu *rvu,
+ struct nix_inline_ipsec_cfg *req,
+ struct msg_rsp *rsp)
+{
+ if (!is_block_implemented(rvu->hw, BLKADDR_CPT0))
+ return 0;
+
+ nix_inline_ipsec_cfg(rvu, req, BLKADDR_NIX0);
+ if (is_block_implemented(rvu->hw, BLKADDR_CPT1))
+ nix_inline_ipsec_cfg(rvu, req, BLKADDR_NIX1);
+
+ return 0;
+}
+
+int rvu_mbox_handler_nix_inline_ipsec_lf_cfg(struct rvu *rvu,
+ struct nix_inline_ipsec_lf_cfg *req,
+ struct msg_rsp *rsp)
+{
+ int lf, blkaddr, err;
+ u64 val;
+
+ if (!is_block_implemented(rvu->hw, BLKADDR_CPT0))
+ return 0;
+
+ err = nix_get_nixlf(rvu, req->hdr.pcifunc, &lf, &blkaddr);
+ if (err)
+ return err;
+
+ if (req->enable) {
+ /* Set TT, TAG_CONST, SA_POW2_SIZE and LENM1_MAX */
+ val = (u64)req->ipsec_cfg0.tt << 44 |
+ (u64)req->ipsec_cfg0.tag_const << 20 |
+ (u64)req->ipsec_cfg0.sa_pow2_size << 16 |
+ req->ipsec_cfg0.lenm1_max;
+
+ if (blkaddr == BLKADDR_NIX1)
+ val |= BIT_ULL(46);
+
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG0(lf), val);
+
+ /* Set SA_IDX_W and SA_IDX_MAX */
+ val = (u64)req->ipsec_cfg1.sa_idx_w << 32 |
+ req->ipsec_cfg1.sa_idx_max;
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG1(lf), val);
+
+ /* Set SA base address */
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(lf),
+ req->sa_base_addr);
+ } else {
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG0(lf), 0x0);
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG1(lf), 0x0);
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(lf),
+ 0x0);
+ }
+
+ return 0;
+}
void rvu_nix_reset_mac(struct rvu_pfvf *pfvf, int pcifunc)
{
bool from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
index 5efb4174e82d..bb6b42bbefa4 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
@@ -3167,6 +3167,102 @@ int rvu_mbox_handler_npc_get_kex_cfg(struct rvu *rvu, struct msg_req *req,
return 0;
}
+static int
+npc_set_var_len_offset_pkind(struct rvu *rvu, u16 pcifunc, u64 pkind,
+ u8 var_len_off, u8 var_len_off_mask, u8 shift_dir)
+{
+ struct npc_kpu_action0 *act0;
+ u8 shift_count = 0;
+ int blkaddr;
+ u64 val;
+
+ if (!var_len_off_mask)
+ return -EINVAL;
+
+ if (var_len_off_mask != 0xff) {
+ if (shift_dir)
+ shift_count = __ffs(var_len_off_mask);
+ else
+ shift_count = (8 - __fls(var_len_off_mask));
+ }
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, pcifunc);
+ if (blkaddr < 0) {
+ dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__);
+ return -EINVAL;
+ }
+ val = rvu_read64(rvu, blkaddr, NPC_AF_PKINDX_ACTION0(pkind));
+ act0 = (struct npc_kpu_action0 *)&val;
+ act0->var_len_shift = shift_count;
+ act0->var_len_right = shift_dir;
+ act0->var_len_mask = var_len_off_mask;
+ act0->var_len_offset = var_len_off;
+ rvu_write64(rvu, blkaddr, NPC_AF_PKINDX_ACTION0(pkind), val);
+ return 0;
+}
+
+int rvu_npc_set_parse_mode(struct rvu *rvu, u16 pcifunc, u64 mode, u8 dir,
+ u64 pkind, u8 var_len_off, u8 var_len_off_mask,
+ u8 shift_dir)
+
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ int blkaddr, nixlf, rc, intf_mode;
+ int pf = rvu_get_pf(pcifunc);
+ u64 rxpkind, txpkind;
+ u8 cgx_id, lmac_id;
+
+ /* use default pkind to disable edsa/higig */
+ rxpkind = rvu_npc_get_pkind(rvu, pf);
+ txpkind = NPC_TX_DEF_PKIND;
+ intf_mode = NPC_INTF_MODE_DEF;
+
+ if (mode & OTX2_PRIV_FLAGS_CUSTOM) {
+ if (pkind == NPC_RX_CUSTOM_PRE_L2_PKIND) {
+ rc = npc_set_var_len_offset_pkind(rvu, pcifunc, pkind,
+ var_len_off,
+ var_len_off_mask,
+ shift_dir);
+ if (rc)
+ return rc;
+ }
+ rxpkind = pkind;
+ txpkind = pkind;
+ }
+
+ if (dir & PKIND_RX) {
+ /* rx pkind set req valid only for cgx mapped PFs */
+ if (!is_cgx_config_permitted(rvu, pcifunc))
+ return 0;
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+
+ rc = cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id,
+ rxpkind);
+ if (rc)
+ return rc;
+ }
+
+ if (dir & PKIND_TX) {
+ /* Tx pkind set request valid if PCIFUNC has NIXLF attached */
+ rc = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
+ if (rc)
+ return rc;
+
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_PARSE_CFG(nixlf),
+ txpkind);
+ }
+
+ pfvf->intf_mode = intf_mode;
+ return 0;
+}
+
+int rvu_mbox_handler_npc_set_pkind(struct rvu *rvu, struct npc_set_pkind *req,
+ struct msg_rsp *rsp)
+{
+ return rvu_npc_set_parse_mode(rvu, req->hdr.pcifunc, req->mode,
+ req->dir, req->pkind, req->var_len_off,
+ req->var_len_off_mask, req->shift_dir);
+}
+
int rvu_mbox_handler_npc_read_base_steer_rule(struct rvu *rvu,
struct msg_req *req,
struct npc_mcam_read_base_rule_rsp *rsp)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
index 21f1ed4e222f..22cd751613cd 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
@@ -236,6 +236,8 @@
#define NIX_AF_RX_DEF_OIP6_DSCP (0x02F8)
#define NIX_AF_RX_IPSEC_GEN_CFG (0x0300)
#define NIX_AF_RX_CPTX_INST_ADDR (0x0310)
+#define NIX_AF_RX_CPTX_INST_QSEL(a) (0x0320ull | (uint64_t)(a) << 3)
+#define NIX_AF_RX_CPTX_CREDIT(a) (0x0360ull | (uint64_t)(a) << 3)
#define NIX_AF_NDC_TX_SYNC (0x03F0)
#define NIX_AF_AQ_CFG (0x0400)
#define NIX_AF_AQ_BASE (0x0410)
@@ -525,6 +527,7 @@
#define CPT_AF_CTX_WBACK_LATENCY_PC (0x49448ull)
#define CPT_AF_CTX_PSH_PC (0x49450ull)
#define CPT_AF_CTX_PSH_LATENCY_PC (0x49458ull)
+#define CPT_AF_CTX_CAM_DATA(a) (0x49800ull | (u64)(a) << 3)
#define CPT_AF_RXC_TIME (0x50010ull)
#define CPT_AF_RXC_TIME_CFG (0x50018ull)
#define CPT_AF_RXC_DFRG (0x50020ull)
@@ -542,6 +545,7 @@
#define CPT_LF_CTL 0x10
#define CPT_LF_INPROG 0x40
#define CPT_LF_Q_GRP_PTR 0x120
+#define CPT_LF_CTX_FLUSH 0x510
#define NPC_AF_BLK_RST (0x00040)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
index 77ac96693f04..edc9367b1b95 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
@@ -62,6 +62,24 @@ enum rvu_af_int_vec_e {
RVU_AF_INT_VEC_CNT = 0x5,
};
+/* CPT Admin function Interrupt Vector Enumeration */
+enum cpt_af_int_vec_e {
+ CPT_AF_INT_VEC_FLT0 = 0x0,
+ CPT_AF_INT_VEC_FLT1 = 0x1,
+ CPT_AF_INT_VEC_RVU = 0x2,
+ CPT_AF_INT_VEC_RAS = 0x3,
+ CPT_AF_INT_VEC_CNT = 0x4,
+};
+
+enum cpt_10k_af_int_vec_e {
+ CPT_10K_AF_INT_VEC_FLT0 = 0x0,
+ CPT_10K_AF_INT_VEC_FLT1 = 0x1,
+ CPT_10K_AF_INT_VEC_FLT2 = 0x2,
+ CPT_10K_AF_INT_VEC_RVU = 0x3,
+ CPT_10K_AF_INT_VEC_RAS = 0x4,
+ CPT_10K_AF_INT_VEC_CNT = 0x5,
+};
+
/* NPA Admin function Interrupt Vector Enumeration */
enum npa_af_int_vec_e {
NPA_AF_INT_VEC_RVU = 0x0,
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
index b92c267628b8..0048b5946712 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
@@ -3,11 +3,11 @@
# Makefile for Marvell's RVU Ethernet device drivers
#
-obj-$(CONFIG_OCTEONTX2_PF) += rvu_nicpf.o
-obj-$(CONFIG_OCTEONTX2_VF) += rvu_nicvf.o
+obj-$(CONFIG_OCTEONTX2_PF) += rvu_nicpf.o otx2_ptp.o
+obj-$(CONFIG_OCTEONTX2_VF) += rvu_nicvf.o otx2_ptp.o
rvu_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o \
- otx2_ptp.o otx2_flows.o otx2_tc.o cn10k.o otx2_dmac_flt.o \
+ otx2_flows.o otx2_tc.o cn10k.o otx2_dmac_flt.o \
otx2_devlink.o
rvu_nicvf-y := otx2_vf.o otx2_devlink.o
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
index 95f21dfdba48..fd4f083c699e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
@@ -88,7 +88,7 @@ int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura)
aq->sq.ena = 1;
/* Only one SMQ is allocated, map all SQ's to that SMQ */
aq->sq.smq = pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][0];
- aq->sq.smq_rr_weight = mtu_to_dwrr_weight(pfvf, pfvf->max_frs);
+ aq->sq.smq_rr_weight = mtu_to_dwrr_weight(pfvf, pfvf->tx_max_pktlen);
aq->sq.default_chan = pfvf->hw.tx_chan_base;
aq->sq.sqe_stype = NIX_STYPE_STF; /* Cache SQB */
aq->sq.sqb_aura = sqb_aura;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index 78df173e6df2..66da31f30d3e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -188,7 +188,7 @@ static int otx2_hw_get_mac_addr(struct otx2_nic *pfvf,
return PTR_ERR(msghdr);
}
rsp = (struct nix_get_mac_addr_rsp *)msghdr;
- ether_addr_copy(netdev->dev_addr, rsp->mac_addr);
+ eth_hw_addr_set(netdev, rsp->mac_addr);
mutex_unlock(&pfvf->mbox.lock);
return 0;
@@ -203,7 +203,7 @@ int otx2_set_mac_address(struct net_device *netdev, void *p)
return -EADDRNOTAVAIL;
if (!otx2_hw_set_mac_addr(pfvf, addr->sa_data)) {
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, addr->sa_data);
/* update dmac field in vlan offload rule */
if (netif_running(netdev) &&
pfvf->flags & OTX2_FLAG_RX_VLAN_SUPPORT)
@@ -231,7 +231,7 @@ int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu)
return -ENOMEM;
}
- req->maxlen = pfvf->max_frs;
+ req->maxlen = pfvf->netdev->mtu + OTX2_ETH_HLEN + OTX2_HW_TIMESTAMP_LEN;
err = otx2_sync_mbox_msg(&pfvf->mbox);
mutex_unlock(&pfvf->mbox.lock);
@@ -590,7 +590,7 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
u64 schq, parent;
u64 dwrr_val;
- dwrr_val = mtu_to_dwrr_weight(pfvf, pfvf->max_frs);
+ dwrr_val = mtu_to_dwrr_weight(pfvf, pfvf->tx_max_pktlen);
req = otx2_mbox_alloc_msg_nix_txschq_cfg(&pfvf->mbox);
if (!req)
@@ -603,9 +603,7 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
/* Set topology e.t.c configuration */
if (lvl == NIX_TXSCH_LVL_SMQ) {
req->reg[0] = NIX_AF_SMQX_CFG(schq);
- req->regval[0] = ((pfvf->netdev->max_mtu + OTX2_ETH_HLEN) << 8)
- | OTX2_MIN_MTU;
-
+ req->regval[0] = ((u64)pfvf->tx_max_pktlen << 8) | OTX2_MIN_MTU;
req->regval[0] |= (0x20ULL << 51) | (0x80ULL << 39) |
(0x2ULL << 36);
req->num_regs++;
@@ -718,7 +716,7 @@ void otx2_sqb_flush(struct otx2_nic *pfvf)
int timeout = 1000;
ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_STATUS);
- for (qidx = 0; qidx < pfvf->hw.tx_queues; qidx++) {
+ for (qidx = 0; qidx < pfvf->hw.tot_tx_queues; qidx++) {
incr = (u64)qidx << 32;
while (timeout) {
val = otx2_atomic64_add(incr, ptr);
@@ -800,7 +798,7 @@ int otx2_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura)
aq->sq.ena = 1;
/* Only one SMQ is allocated, map all SQ's to that SMQ */
aq->sq.smq = pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][0];
- aq->sq.smq_rr_quantum = mtu_to_dwrr_weight(pfvf, pfvf->max_frs);
+ aq->sq.smq_rr_quantum = mtu_to_dwrr_weight(pfvf, pfvf->tx_max_pktlen);
aq->sq.default_chan = pfvf->hw.tx_chan_base;
aq->sq.sqe_stype = NIX_STYPE_STF; /* Cache SQB */
aq->sq.sqb_aura = sqb_aura;
@@ -835,17 +833,19 @@ static int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura)
if (err)
return err;
- err = qmem_alloc(pfvf->dev, &sq->tso_hdrs, qset->sqe_cnt,
- TSO_HEADER_SIZE);
- if (err)
- return err;
+ if (qidx < pfvf->hw.tx_queues) {
+ err = qmem_alloc(pfvf->dev, &sq->tso_hdrs, qset->sqe_cnt,
+ TSO_HEADER_SIZE);
+ if (err)
+ return err;
+ }
sq->sqe_base = sq->sqe->base;
sq->sg = kcalloc(qset->sqe_cnt, sizeof(struct sg_list), GFP_KERNEL);
if (!sq->sg)
return -ENOMEM;
- if (pfvf->ptp) {
+ if (pfvf->ptp && qidx < pfvf->hw.tx_queues) {
err = qmem_alloc(pfvf->dev, &sq->timestamps, qset->sqe_cnt,
sizeof(*sq->timestamps));
if (err)
@@ -871,20 +871,27 @@ static int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura)
static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx)
{
struct otx2_qset *qset = &pfvf->qset;
+ int err, pool_id, non_xdp_queues;
struct nix_aq_enq_req *aq;
struct otx2_cq_queue *cq;
- int err, pool_id;
cq = &qset->cq[qidx];
cq->cq_idx = qidx;
+ non_xdp_queues = pfvf->hw.rx_queues + pfvf->hw.tx_queues;
if (qidx < pfvf->hw.rx_queues) {
cq->cq_type = CQ_RX;
cq->cint_idx = qidx;
cq->cqe_cnt = qset->rqe_cnt;
- } else {
+ if (pfvf->xdp_prog)
+ xdp_rxq_info_reg(&cq->xdp_rxq, pfvf->netdev, qidx, 0);
+ } else if (qidx < non_xdp_queues) {
cq->cq_type = CQ_TX;
cq->cint_idx = qidx - pfvf->hw.rx_queues;
cq->cqe_cnt = qset->sqe_cnt;
+ } else {
+ cq->cq_type = CQ_XDP;
+ cq->cint_idx = qidx - non_xdp_queues;
+ cq->cqe_cnt = qset->sqe_cnt;
}
cq->cqe_size = pfvf->qset.xqe_size;
@@ -991,7 +998,7 @@ int otx2_config_nix_queues(struct otx2_nic *pfvf)
}
/* Initialize TX queues */
- for (qidx = 0; qidx < pfvf->hw.tx_queues; qidx++) {
+ for (qidx = 0; qidx < pfvf->hw.tot_tx_queues; qidx++) {
u16 sqb_aura = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx);
err = otx2_sq_init(pfvf, qidx, sqb_aura);
@@ -1006,6 +1013,9 @@ int otx2_config_nix_queues(struct otx2_nic *pfvf)
return err;
}
+ pfvf->cq_op_addr = (__force u64 *)otx2_get_regaddr(pfvf,
+ NIX_LF_CQ_OP_STATUS);
+
/* Initialize work queue for receive buffer refill */
pfvf->refill_wrk = devm_kcalloc(pfvf->dev, pfvf->qset.cq_cnt,
sizeof(struct refill_work), GFP_KERNEL);
@@ -1035,7 +1045,7 @@ int otx2_config_nix(struct otx2_nic *pfvf)
/* Set RQ/SQ/CQ counts */
nixlf->rq_cnt = pfvf->hw.rx_queues;
- nixlf->sq_cnt = pfvf->hw.tx_queues;
+ nixlf->sq_cnt = pfvf->hw.tot_tx_queues;
nixlf->cq_cnt = pfvf->qset.cq_cnt;
nixlf->rss_sz = MAX_RSS_INDIR_TBL_SIZE;
nixlf->rss_grps = MAX_RSS_GROUPS;
@@ -1073,7 +1083,7 @@ void otx2_sq_free_sqbs(struct otx2_nic *pfvf)
int sqb, qidx;
u64 iova, pa;
- for (qidx = 0; qidx < hw->tx_queues; qidx++) {
+ for (qidx = 0; qidx < hw->tot_tx_queues; qidx++) {
sq = &qset->sq[qidx];
if (!sq->sqb_ptrs)
continue;
@@ -1285,7 +1295,7 @@ int otx2_sq_aura_pool_init(struct otx2_nic *pfvf)
stack_pages =
(num_sqbs + hw->stack_pg_ptrs - 1) / hw->stack_pg_ptrs;
- for (qidx = 0; qidx < hw->tx_queues; qidx++) {
+ for (qidx = 0; qidx < hw->tot_tx_queues; qidx++) {
pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx);
/* Initialize aura context */
err = otx2_aura_init(pfvf, pool_id, pool_id, num_sqbs);
@@ -1305,7 +1315,7 @@ int otx2_sq_aura_pool_init(struct otx2_nic *pfvf)
goto fail;
/* Allocate pointers and free them to aura/pool */
- for (qidx = 0; qidx < hw->tx_queues; qidx++) {
+ for (qidx = 0; qidx < hw->tot_tx_queues; qidx++) {
pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx);
pool = &pfvf->qset.pool[pool_id];
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index a51ecd771d07..61e52812983f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -171,6 +171,8 @@ struct otx2_hw {
struct otx2_rss_info rss_info;
u16 rx_queues;
u16 tx_queues;
+ u16 xdp_queues;
+ u16 tot_tx_queues;
u16 max_queues;
u16 pool_cnt;
u16 rqpool_cnt;
@@ -223,6 +225,7 @@ struct otx2_hw {
#define HW_TSO 0
#define CN10K_MBOX 1
#define CN10K_LMTST 2
+#define CN10K_RPM 3
unsigned long cap_flag;
#define LMT_LINE_SIZE 128
@@ -263,6 +266,12 @@ struct otx2_ptp {
struct cyclecounter cycle_counter;
struct timecounter time_counter;
+
+ struct delayed_work extts_work;
+ u64 last_extts;
+ u64 thresh;
+
+ struct ptp_pin_desc extts_config;
};
#define OTX2_HW_TIMESTAMP_LEN 8
@@ -317,7 +326,7 @@ struct otx2_nic {
struct net_device *netdev;
struct dev_hw_ops *hw_ops;
void *iommu_domain;
- u16 max_frs;
+ u16 tx_max_pktlen;
u16 rbsize; /* Receive buffer size */
#define OTX2_FLAG_RX_TSTAMP_ENABLED BIT_ULL(0)
@@ -336,7 +345,9 @@ struct otx2_nic {
#define OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED BIT_ULL(13)
#define OTX2_FLAG_DMACFLTR_SUPPORT BIT_ULL(14)
u64 flags;
+ u64 *cq_op_addr;
+ struct bpf_prog *xdp_prog;
struct otx2_qset qset;
struct otx2_hw hw;
struct pci_dev *pdev;
@@ -452,6 +463,7 @@ static inline void otx2_setup_dev_hw_settings(struct otx2_nic *pfvf)
if (!is_dev_otx2(pfvf->pdev)) {
__set_bit(CN10K_MBOX, &hw->cap_flag);
__set_bit(CN10K_LMTST, &hw->cap_flag);
+ __set_bit(CN10K_RPM, &hw->cap_flag);
}
}
@@ -825,6 +837,9 @@ int otx2_open(struct net_device *netdev);
int otx2_stop(struct net_device *netdev);
int otx2_set_real_num_queues(struct net_device *netdev,
int tx_queues, int rx_queues);
+int otx2_ioctl(struct net_device *netdev, struct ifreq *req, int cmd);
+int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr);
+
/* MCAM filter related APIs */
int otx2_mcam_flow_init(struct otx2_nic *pf);
int otx2vf_mcam_flow_init(struct otx2_nic *pfvf);
@@ -845,6 +860,7 @@ int otx2_del_macfilter(struct net_device *netdev, const u8 *mac);
int otx2_add_macfilter(struct net_device *netdev, const u8 *mac);
int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable);
int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf);
+bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, u16 qidx);
u16 otx2_get_max_mtu(struct otx2_nic *pfvf);
/* tc support */
int otx2_init_tc(struct otx2_nic *nic);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c
index 7ac3ef2fa06a..777a27047c8e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c
@@ -108,13 +108,6 @@ int otx2_register_dl(struct otx2_nic *pfvf)
return -ENOMEM;
}
- err = devlink_register(dl);
- if (err) {
- dev_err(pfvf->dev, "devlink register failed with error %d\n", err);
- devlink_free(dl);
- return err;
- }
-
otx2_dl = devlink_priv(dl);
otx2_dl->dl = dl;
otx2_dl->pfvf = pfvf;
@@ -128,12 +121,10 @@ int otx2_register_dl(struct otx2_nic *pfvf)
goto err_dl;
}
- devlink_params_publish(dl);
-
+ devlink_register(dl);
return 0;
err_dl:
- devlink_unregister(dl);
devlink_free(dl);
return err;
}
@@ -141,16 +132,10 @@ err_dl:
void otx2_unregister_dl(struct otx2_nic *pfvf)
{
struct otx2_devlink *otx2_dl = pfvf->dl;
- struct devlink *dl;
-
- if (!otx2_dl || !otx2_dl->dl)
- return;
-
- dl = otx2_dl->dl;
+ struct devlink *dl = otx2_dl->dl;
+ devlink_unregister(dl);
devlink_params_unregister(dl, otx2_dl_params,
ARRAY_SIZE(otx2_dl_params));
-
- devlink_unregister(dl);
devlink_free(dl);
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
index dbfa3bc39e34..b0f57bda7e27 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
@@ -121,14 +121,16 @@ static void otx2_get_strings(struct net_device *netdev, u32 sset, u8 *data)
otx2_get_qset_strings(pfvf, &data, 0);
- for (stats = 0; stats < CGX_RX_STATS_COUNT; stats++) {
- sprintf(data, "cgx_rxstat%d: ", stats);
- data += ETH_GSTRING_LEN;
- }
+ if (!test_bit(CN10K_RPM, &pfvf->hw.cap_flag)) {
+ for (stats = 0; stats < CGX_RX_STATS_COUNT; stats++) {
+ sprintf(data, "cgx_rxstat%d: ", stats);
+ data += ETH_GSTRING_LEN;
+ }
- for (stats = 0; stats < CGX_TX_STATS_COUNT; stats++) {
- sprintf(data, "cgx_txstat%d: ", stats);
- data += ETH_GSTRING_LEN;
+ for (stats = 0; stats < CGX_TX_STATS_COUNT; stats++) {
+ sprintf(data, "cgx_txstat%d: ", stats);
+ data += ETH_GSTRING_LEN;
+ }
}
strcpy(data, "reset_count");
@@ -205,11 +207,15 @@ static void otx2_get_ethtool_stats(struct net_device *netdev,
[otx2_drv_stats[stat].index]);
otx2_get_qset_stats(pfvf, stats, &data);
- otx2_update_lmac_stats(pfvf);
- for (stat = 0; stat < CGX_RX_STATS_COUNT; stat++)
- *(data++) = pfvf->hw.cgx_rx_stats[stat];
- for (stat = 0; stat < CGX_TX_STATS_COUNT; stat++)
- *(data++) = pfvf->hw.cgx_tx_stats[stat];
+
+ if (!test_bit(CN10K_RPM, &pfvf->hw.cap_flag)) {
+ otx2_update_lmac_stats(pfvf);
+ for (stat = 0; stat < CGX_RX_STATS_COUNT; stat++)
+ *(data++) = pfvf->hw.cgx_rx_stats[stat];
+ for (stat = 0; stat < CGX_TX_STATS_COUNT; stat++)
+ *(data++) = pfvf->hw.cgx_tx_stats[stat];
+ }
+
*(data++) = pfvf->reset_count;
fec_corr_blks = pfvf->hw.cgx_fec_corr_blks;
@@ -242,18 +248,19 @@ static void otx2_get_ethtool_stats(struct net_device *netdev,
static int otx2_get_sset_count(struct net_device *netdev, int sset)
{
struct otx2_nic *pfvf = netdev_priv(netdev);
- int qstats_count;
+ int qstats_count, mac_stats = 0;
if (sset != ETH_SS_STATS)
return -EINVAL;
qstats_count = otx2_n_queue_stats *
(pfvf->hw.rx_queues + pfvf->hw.tx_queues);
+ if (!test_bit(CN10K_RPM, &pfvf->hw.cap_flag))
+ mac_stats = CGX_RX_STATS_COUNT + CGX_TX_STATS_COUNT;
otx2_update_lmac_fec_stats(pfvf);
return otx2_n_dev_stats + otx2_n_drv_stats + qstats_count +
- CGX_RX_STATS_COUNT + CGX_TX_STATS_COUNT + OTX2_FEC_STATS_CNT
- + 1;
+ mac_stats + OTX2_FEC_STATS_CNT + 1;
}
/* Get no of queues device supports and current queue count */
@@ -1340,6 +1347,7 @@ static const struct ethtool_ops otx2vf_ethtool_ops = {
.get_pauseparam = otx2_get_pauseparam,
.set_pauseparam = otx2_set_pauseparam,
.get_link_ksettings = otx2vf_get_link_ksettings,
+ .get_ts_info = otx2_get_ts_info,
};
void otx2vf_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index 53df7fff92c4..1e0d0c9c1dac 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -13,6 +13,8 @@
#include <linux/if_vlan.h>
#include <linux/iommu.h>
#include <net/ip.h>
+#include <linux/bpf.h>
+#include <linux/bpf_trace.h>
#include "otx2_reg.h"
#include "otx2_common.h"
@@ -48,9 +50,15 @@ static int otx2_config_hw_rx_tstamp(struct otx2_nic *pfvf, bool enable);
static int otx2_change_mtu(struct net_device *netdev, int new_mtu)
{
+ struct otx2_nic *pf = netdev_priv(netdev);
bool if_up = netif_running(netdev);
int err = 0;
+ if (pf->xdp_prog && new_mtu > MAX_XDP_MTU) {
+ netdev_warn(netdev, "Jumbo frames not yet supported with XDP, current MTU %d.\n",
+ netdev->mtu);
+ return -EINVAL;
+ }
if (if_up)
otx2_stop(netdev);
@@ -1180,7 +1188,7 @@ static irqreturn_t otx2_q_intr_handler(int irq, void *data)
}
/* SQ */
- for (qidx = 0; qidx < pf->hw.tx_queues; qidx++) {
+ for (qidx = 0; qidx < pf->hw.tot_tx_queues; qidx++) {
ptr = otx2_get_regaddr(pf, NIX_LF_SQ_OP_INT);
val = otx2_atomic64_add((qidx << 44), ptr);
otx2_write64(pf, NIX_LF_SQ_OP_INT, (qidx << 44) |
@@ -1283,7 +1291,7 @@ static void otx2_free_sq_res(struct otx2_nic *pf)
otx2_ctx_disable(&pf->mbox, NIX_AQ_CTYPE_SQ, false);
/* Free SQB pointers */
otx2_sq_free_sqbs(pf);
- for (qidx = 0; qidx < pf->hw.tx_queues; qidx++) {
+ for (qidx = 0; qidx < pf->hw.tot_tx_queues; qidx++) {
sq = &qset->sq[qidx];
qmem_free(pf->dev, sq->sqe);
qmem_free(pf->dev, sq->tso_hdrs);
@@ -1304,16 +1312,14 @@ static int otx2_get_rbuf_size(struct otx2_nic *pf, int mtu)
* NIX transfers entire data using 6 segments/buffers and writes
* a CQE_RX descriptor with those segment addresses. First segment
* has additional data prepended to packet. Also software omits a
- * headroom of 128 bytes and sizeof(struct skb_shared_info) in
- * each segment. Hence the total size of memory needed
- * to receive a packet with 'mtu' is:
+ * headroom of 128 bytes in each segment. Hence the total size of
+ * memory needed to receive a packet with 'mtu' is:
* frame size = mtu + additional data;
- * memory = frame_size + (headroom + struct skb_shared_info size) * 6;
+ * memory = frame_size + headroom * 6;
* each receive buffer size = memory / 6;
*/
frame_size = mtu + OTX2_ETH_HLEN + OTX2_HW_TIMESTAMP_LEN;
- total_size = frame_size + (OTX2_HEAD_ROOM +
- OTX2_DATA_ALIGN(sizeof(struct skb_shared_info))) * 6;
+ total_size = frame_size + OTX2_HEAD_ROOM * 6;
rbuf_size = total_size / 6;
return ALIGN(rbuf_size, 2048);
@@ -1332,10 +1338,11 @@ static int otx2_init_hw_resources(struct otx2_nic *pf)
* so, aura count = pool count.
*/
hw->rqpool_cnt = hw->rx_queues;
- hw->sqpool_cnt = hw->tx_queues;
+ hw->sqpool_cnt = hw->tot_tx_queues;
hw->pool_cnt = hw->rqpool_cnt + hw->sqpool_cnt;
- pf->max_frs = pf->netdev->mtu + OTX2_ETH_HLEN + OTX2_HW_TIMESTAMP_LEN;
+ /* Maximum hardware supported transmit length */
+ pf->tx_max_pktlen = pf->netdev->max_mtu + OTX2_ETH_HLEN;
pf->rbsize = otx2_get_rbuf_size(pf, pf->netdev->mtu);
@@ -1493,6 +1500,44 @@ static void otx2_free_hw_resources(struct otx2_nic *pf)
mutex_unlock(&mbox->lock);
}
+static void otx2_do_set_rx_mode(struct otx2_nic *pf)
+{
+ struct net_device *netdev = pf->netdev;
+ struct nix_rx_mode *req;
+ bool promisc = false;
+
+ if (!(netdev->flags & IFF_UP))
+ return;
+
+ if ((netdev->flags & IFF_PROMISC) ||
+ (netdev_uc_count(netdev) > OTX2_MAX_UNICAST_FLOWS)) {
+ promisc = true;
+ }
+
+ /* Write unicast address to mcam entries or del from mcam */
+ if (!promisc && netdev->priv_flags & IFF_UNICAST_FLT)
+ __dev_uc_sync(netdev, otx2_add_macfilter, otx2_del_macfilter);
+
+ mutex_lock(&pf->mbox.lock);
+ req = otx2_mbox_alloc_msg_nix_set_rx_mode(&pf->mbox);
+ if (!req) {
+ mutex_unlock(&pf->mbox.lock);
+ return;
+ }
+
+ req->mode = NIX_RX_MODE_UCAST;
+
+ if (promisc)
+ req->mode |= NIX_RX_MODE_PROMISC;
+ if (netdev->flags & (IFF_ALLMULTI | IFF_MULTICAST))
+ req->mode |= NIX_RX_MODE_ALLMULTI;
+
+ req->mode |= NIX_RX_MODE_USE_MCE;
+
+ otx2_sync_mbox_msg(&pf->mbox);
+ mutex_unlock(&pf->mbox.lock);
+}
+
int otx2_open(struct net_device *netdev)
{
struct otx2_nic *pf = netdev_priv(netdev);
@@ -1503,7 +1548,7 @@ int otx2_open(struct net_device *netdev)
netif_carrier_off(netdev);
- pf->qset.cq_cnt = pf->hw.rx_queues + pf->hw.tx_queues;
+ pf->qset.cq_cnt = pf->hw.rx_queues + pf->hw.tot_tx_queues;
/* RQ and SQs are mapped to different CQs,
* so find out max CQ IRQs (i.e CINTs) needed.
*/
@@ -1523,7 +1568,7 @@ int otx2_open(struct net_device *netdev)
if (!qset->cq)
goto err_free_mem;
- qset->sq = kcalloc(pf->hw.tx_queues,
+ qset->sq = kcalloc(pf->hw.tot_tx_queues,
sizeof(struct otx2_snd_queue), GFP_KERNEL);
if (!qset->sq)
goto err_free_mem;
@@ -1544,11 +1589,20 @@ int otx2_open(struct net_device *netdev)
/* RQ0 & SQ0 are mapped to CINT0 and so on..
* 'cq_ids[0]' points to RQ's CQ and
* 'cq_ids[1]' points to SQ's CQ and
+ * 'cq_ids[2]' points to XDP's CQ and
*/
cq_poll->cq_ids[CQ_RX] =
(qidx < pf->hw.rx_queues) ? qidx : CINT_INVALID_CQ;
cq_poll->cq_ids[CQ_TX] = (qidx < pf->hw.tx_queues) ?
qidx + pf->hw.rx_queues : CINT_INVALID_CQ;
+ if (pf->xdp_prog)
+ cq_poll->cq_ids[CQ_XDP] = (qidx < pf->hw.xdp_queues) ?
+ (qidx + pf->hw.rx_queues +
+ pf->hw.tx_queues) :
+ CINT_INVALID_CQ;
+ else
+ cq_poll->cq_ids[CQ_XDP] = CINT_INVALID_CQ;
+
cq_poll->dev = (void *)pf;
netif_napi_add(netdev, &cq_poll->napi,
otx2_napi_handler, NAPI_POLL_WEIGHT);
@@ -1646,6 +1700,8 @@ int otx2_open(struct net_device *netdev)
if (err)
goto err_tx_stop_queues;
+ otx2_do_set_rx_mode(pf);
+
return 0;
err_tx_stop_queues:
@@ -1750,7 +1806,7 @@ static netdev_tx_t otx2_xmit(struct sk_buff *skb, struct net_device *netdev)
/* Check for minimum and maximum packet length */
if (skb->len <= ETH_HLEN ||
- (!skb_shinfo(skb)->gso_size && skb->len > pf->max_frs)) {
+ (!skb_shinfo(skb)->gso_size && skb->len > pf->tx_max_pktlen)) {
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
@@ -1791,43 +1847,11 @@ static void otx2_set_rx_mode(struct net_device *netdev)
queue_work(pf->otx2_wq, &pf->rx_mode_work);
}
-static void otx2_do_set_rx_mode(struct work_struct *work)
+static void otx2_rx_mode_wrk_handler(struct work_struct *work)
{
struct otx2_nic *pf = container_of(work, struct otx2_nic, rx_mode_work);
- struct net_device *netdev = pf->netdev;
- struct nix_rx_mode *req;
- bool promisc = false;
-
- if (!(netdev->flags & IFF_UP))
- return;
-
- if ((netdev->flags & IFF_PROMISC) ||
- (netdev_uc_count(netdev) > OTX2_MAX_UNICAST_FLOWS)) {
- promisc = true;
- }
-
- /* Write unicast address to mcam entries or del from mcam */
- if (!promisc && netdev->priv_flags & IFF_UNICAST_FLT)
- __dev_uc_sync(netdev, otx2_add_macfilter, otx2_del_macfilter);
-
- mutex_lock(&pf->mbox.lock);
- req = otx2_mbox_alloc_msg_nix_set_rx_mode(&pf->mbox);
- if (!req) {
- mutex_unlock(&pf->mbox.lock);
- return;
- }
-
- req->mode = NIX_RX_MODE_UCAST;
-
- if (promisc)
- req->mode |= NIX_RX_MODE_PROMISC;
- if (netdev->flags & (IFF_ALLMULTI | IFF_MULTICAST))
- req->mode |= NIX_RX_MODE_ALLMULTI;
- req->mode |= NIX_RX_MODE_USE_MCE;
-
- otx2_sync_mbox_msg(&pf->mbox);
- mutex_unlock(&pf->mbox.lock);
+ otx2_do_set_rx_mode(pf);
}
static int otx2_set_features(struct net_device *netdev,
@@ -1967,7 +1991,7 @@ static int otx2_config_hw_tx_tstamp(struct otx2_nic *pfvf, bool enable)
return 0;
}
-static int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr)
+int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr)
{
struct otx2_nic *pfvf = netdev_priv(netdev);
struct hwtstamp_config config;
@@ -2023,8 +2047,9 @@ static int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr)
return copy_to_user(ifr->ifr_data, &config,
sizeof(config)) ? -EFAULT : 0;
}
+EXPORT_SYMBOL(otx2_config_hwtstamp);
-static int otx2_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
+int otx2_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
{
struct otx2_nic *pfvf = netdev_priv(netdev);
struct hwtstamp_config *cfg = &pfvf->tstamp;
@@ -2039,6 +2064,7 @@ static int otx2_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
return -EOPNOTSUPP;
}
}
+EXPORT_SYMBOL(otx2_ioctl);
static int otx2_do_set_vf_mac(struct otx2_nic *pf, int vf, const u8 *mac)
{
@@ -2281,6 +2307,111 @@ static int otx2_get_vf_config(struct net_device *netdev, int vf,
return 0;
}
+static int otx2_xdp_xmit_tx(struct otx2_nic *pf, struct xdp_frame *xdpf,
+ int qidx)
+{
+ struct page *page;
+ u64 dma_addr;
+ int err = 0;
+
+ dma_addr = otx2_dma_map_page(pf, virt_to_page(xdpf->data),
+ offset_in_page(xdpf->data), xdpf->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(pf->dev, dma_addr))
+ return -ENOMEM;
+
+ err = otx2_xdp_sq_append_pkt(pf, dma_addr, xdpf->len, qidx);
+ if (!err) {
+ otx2_dma_unmap_page(pf, dma_addr, xdpf->len, DMA_TO_DEVICE);
+ page = virt_to_page(xdpf->data);
+ put_page(page);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static int otx2_xdp_xmit(struct net_device *netdev, int n,
+ struct xdp_frame **frames, u32 flags)
+{
+ struct otx2_nic *pf = netdev_priv(netdev);
+ int qidx = smp_processor_id();
+ struct otx2_snd_queue *sq;
+ int drops = 0, i;
+
+ if (!netif_running(netdev))
+ return -ENETDOWN;
+
+ qidx += pf->hw.tx_queues;
+ sq = pf->xdp_prog ? &pf->qset.sq[qidx] : NULL;
+
+ /* Abort xmit if xdp queue is not */
+ if (unlikely(!sq))
+ return -ENXIO;
+
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
+ for (i = 0; i < n; i++) {
+ struct xdp_frame *xdpf = frames[i];
+ int err;
+
+ err = otx2_xdp_xmit_tx(pf, xdpf, qidx);
+ if (err)
+ drops++;
+ }
+ return n - drops;
+}
+
+static int otx2_xdp_setup(struct otx2_nic *pf, struct bpf_prog *prog)
+{
+ struct net_device *dev = pf->netdev;
+ bool if_up = netif_running(pf->netdev);
+ struct bpf_prog *old_prog;
+
+ if (prog && dev->mtu > MAX_XDP_MTU) {
+ netdev_warn(dev, "Jumbo frames not yet supported with XDP\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (if_up)
+ otx2_stop(pf->netdev);
+
+ old_prog = xchg(&pf->xdp_prog, prog);
+
+ if (old_prog)
+ bpf_prog_put(old_prog);
+
+ if (pf->xdp_prog)
+ bpf_prog_add(pf->xdp_prog, pf->hw.rx_queues - 1);
+
+ /* Network stack and XDP shared same rx queues.
+ * Use separate tx queues for XDP and network stack.
+ */
+ if (pf->xdp_prog)
+ pf->hw.xdp_queues = pf->hw.rx_queues;
+ else
+ pf->hw.xdp_queues = 0;
+
+ pf->hw.tot_tx_queues += pf->hw.xdp_queues;
+
+ if (if_up)
+ otx2_open(pf->netdev);
+
+ return 0;
+}
+
+static int otx2_xdp(struct net_device *netdev, struct netdev_bpf *xdp)
+{
+ struct otx2_nic *pf = netdev_priv(netdev);
+
+ switch (xdp->command) {
+ case XDP_SETUP_PROG:
+ return otx2_xdp_setup(pf, xdp->prog);
+ default:
+ return -EINVAL;
+ }
+}
+
static int otx2_set_vf_permissions(struct otx2_nic *pf, int vf,
int req_perm)
{
@@ -2348,6 +2479,8 @@ static const struct net_device_ops otx2_netdev_ops = {
.ndo_set_vf_mac = otx2_set_vf_mac,
.ndo_set_vf_vlan = otx2_set_vf_vlan,
.ndo_get_vf_config = otx2_get_vf_config,
+ .ndo_bpf = otx2_xdp,
+ .ndo_xdp_xmit = otx2_xdp_xmit,
.ndo_setup_tc = otx2_setup_tc,
.ndo_set_vf_trust = otx2_ndo_set_vf_trust,
};
@@ -2358,7 +2491,7 @@ static int otx2_wq_init(struct otx2_nic *pf)
if (!pf->otx2_wq)
return -ENOMEM;
- INIT_WORK(&pf->rx_mode_work, otx2_do_set_rx_mode);
+ INIT_WORK(&pf->rx_mode_work, otx2_rx_mode_wrk_handler);
INIT_WORK(&pf->reset_task, otx2_reset_task);
return 0;
}
@@ -2489,6 +2622,7 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
hw->pdev = pdev;
hw->rx_queues = qcount;
hw->tx_queues = qcount;
+ hw->tot_tx_queues = qcount;
hw->max_queues = qcount;
num_vec = pci_msix_vec_count(pdev);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
index ec9e49985c2c..0ef68fdd1f26 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
@@ -27,6 +27,23 @@ static int otx2_ptp_adjfine(struct ptp_clock_info *ptp_info, long scaled_ppm)
return otx2_sync_mbox_msg(&ptp->nic->mbox);
}
+static int ptp_set_thresh(struct otx2_ptp *ptp, u64 thresh)
+{
+ struct ptp_req *req;
+
+ if (!ptp->nic)
+ return -ENODEV;
+
+ req = otx2_mbox_alloc_msg_ptp_op(&ptp->nic->mbox);
+ if (!req)
+ return -ENOMEM;
+
+ req->op = PTP_OP_SET_THRESH;
+ req->thresh = thresh;
+
+ return otx2_sync_mbox_msg(&ptp->nic->mbox);
+}
+
static u64 ptp_cc_read(const struct cyclecounter *cc)
{
struct otx2_ptp *ptp = container_of(cc, struct otx2_ptp, cycle_counter);
@@ -55,6 +72,33 @@ static u64 ptp_cc_read(const struct cyclecounter *cc)
return rsp->clk;
}
+static u64 ptp_tstmp_read(struct otx2_ptp *ptp)
+{
+ struct ptp_req *req;
+ struct ptp_rsp *rsp;
+ int err;
+
+ if (!ptp->nic)
+ return 0;
+
+ req = otx2_mbox_alloc_msg_ptp_op(&ptp->nic->mbox);
+ if (!req)
+ return 0;
+
+ req->op = PTP_OP_GET_TSTMP;
+
+ err = otx2_sync_mbox_msg(&ptp->nic->mbox);
+ if (err)
+ return 0;
+
+ rsp = (struct ptp_rsp *)otx2_mbox_get_rsp(&ptp->nic->mbox.mbox, 0,
+ &req->hdr);
+ if (IS_ERR(rsp))
+ return 0;
+
+ return rsp->clk;
+}
+
static int otx2_ptp_adjtime(struct ptp_clock_info *ptp_info, s64 delta)
{
struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp,
@@ -102,9 +146,73 @@ static int otx2_ptp_settime(struct ptp_clock_info *ptp_info,
return 0;
}
+static int otx2_ptp_verify_pin(struct ptp_clock_info *ptp, unsigned int pin,
+ enum ptp_pin_function func, unsigned int chan)
+{
+ switch (func) {
+ case PTP_PF_NONE:
+ case PTP_PF_EXTTS:
+ break;
+ case PTP_PF_PEROUT:
+ case PTP_PF_PHYSYNC:
+ return -1;
+ }
+ return 0;
+}
+
+static void otx2_ptp_extts_check(struct work_struct *work)
+{
+ struct otx2_ptp *ptp = container_of(work, struct otx2_ptp,
+ extts_work.work);
+ struct ptp_clock_event event;
+ u64 tstmp, new_thresh;
+
+ mutex_lock(&ptp->nic->mbox.lock);
+ tstmp = ptp_tstmp_read(ptp);
+ mutex_unlock(&ptp->nic->mbox.lock);
+
+ if (tstmp != ptp->last_extts) {
+ event.type = PTP_CLOCK_EXTTS;
+ event.index = 0;
+ event.timestamp = timecounter_cyc2time(&ptp->time_counter, tstmp);
+ ptp_clock_event(ptp->ptp_clock, &event);
+ ptp->last_extts = tstmp;
+
+ new_thresh = tstmp % 500000000;
+ if (ptp->thresh != new_thresh) {
+ mutex_lock(&ptp->nic->mbox.lock);
+ ptp_set_thresh(ptp, new_thresh);
+ mutex_unlock(&ptp->nic->mbox.lock);
+ ptp->thresh = new_thresh;
+ }
+ }
+ schedule_delayed_work(&ptp->extts_work, msecs_to_jiffies(200));
+}
+
static int otx2_ptp_enable(struct ptp_clock_info *ptp_info,
struct ptp_clock_request *rq, int on)
{
+ struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp,
+ ptp_info);
+ int pin;
+
+ if (!ptp->nic)
+ return -ENODEV;
+
+ switch (rq->type) {
+ case PTP_CLK_REQ_EXTTS:
+ pin = ptp_find_pin(ptp->ptp_clock, PTP_PF_EXTTS,
+ rq->extts.index);
+ if (pin < 0)
+ return -EBUSY;
+ if (on)
+ schedule_delayed_work(&ptp->extts_work, msecs_to_jiffies(200));
+ else
+ cancel_delayed_work_sync(&ptp->extts_work);
+ return 0;
+ default:
+ break;
+ }
return -EOPNOTSUPP;
}
@@ -115,6 +223,11 @@ int otx2_ptp_init(struct otx2_nic *pfvf)
struct ptp_req *req;
int err;
+ if (is_otx2_lbkvf(pfvf->pdev)) {
+ pfvf->ptp = NULL;
+ return 0;
+ }
+
mutex_lock(&pfvf->mbox.lock);
/* check if PTP block is available */
req = otx2_mbox_alloc_msg_ptp_op(&pfvf->mbox);
@@ -149,20 +262,28 @@ int otx2_ptp_init(struct otx2_nic *pfvf)
timecounter_init(&ptp_ptr->time_counter, &ptp_ptr->cycle_counter,
ktime_to_ns(ktime_get_real()));
+ snprintf(ptp_ptr->extts_config.name, sizeof(ptp_ptr->extts_config.name), "TSTAMP");
+ ptp_ptr->extts_config.index = 0;
+ ptp_ptr->extts_config.func = PTP_PF_NONE;
+
ptp_ptr->ptp_info = (struct ptp_clock_info) {
.owner = THIS_MODULE,
.name = "OcteonTX2 PTP",
.max_adj = 1000000000ull,
- .n_ext_ts = 0,
- .n_pins = 0,
+ .n_ext_ts = 1,
+ .n_pins = 1,
.pps = 0,
+ .pin_config = &ptp_ptr->extts_config,
.adjfine = otx2_ptp_adjfine,
.adjtime = otx2_ptp_adjtime,
.gettime64 = otx2_ptp_gettime,
.settime64 = otx2_ptp_settime,
.enable = otx2_ptp_enable,
+ .verify = otx2_ptp_verify_pin,
};
+ INIT_DELAYED_WORK(&ptp_ptr->extts_work, otx2_ptp_extts_check);
+
ptp_ptr->ptp_clock = ptp_clock_register(&ptp_ptr->ptp_info, pfvf->dev);
if (IS_ERR_OR_NULL(ptp_ptr->ptp_clock)) {
err = ptp_ptr->ptp_clock ?
@@ -176,6 +297,7 @@ int otx2_ptp_init(struct otx2_nic *pfvf)
error:
return err;
}
+EXPORT_SYMBOL_GPL(otx2_ptp_init);
void otx2_ptp_destroy(struct otx2_nic *pfvf)
{
@@ -188,6 +310,7 @@ void otx2_ptp_destroy(struct otx2_nic *pfvf)
kfree(ptp);
pfvf->ptp = NULL;
}
+EXPORT_SYMBOL_GPL(otx2_ptp_destroy);
int otx2_ptp_clock_index(struct otx2_nic *pfvf)
{
@@ -196,6 +319,7 @@ int otx2_ptp_clock_index(struct otx2_nic *pfvf)
return ptp_clock_index(pfvf->ptp->ptp_clock);
}
+EXPORT_SYMBOL_GPL(otx2_ptp_clock_index);
int otx2_ptp_tstamp2time(struct otx2_nic *pfvf, u64 tstamp, u64 *tsns)
{
@@ -206,3 +330,8 @@ int otx2_ptp_tstamp2time(struct otx2_nic *pfvf, u64 tstamp, u64 *tsns)
return 0;
}
+EXPORT_SYMBOL_GPL(otx2_ptp_tstamp2time);
+
+MODULE_AUTHOR("Sunil Goutham <sgoutham@marvell.com>");
+MODULE_DESCRIPTION("Marvell RVU NIC PTP Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
index f42b1d4e0c67..0cc6353254bf 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
@@ -8,6 +8,8 @@
#include <linux/etherdevice.h>
#include <net/ip.h>
#include <net/tso.h>
+#include <linux/bpf.h>
+#include <linux/bpf_trace.h>
#include "otx2_reg.h"
#include "otx2_common.h"
@@ -17,6 +19,35 @@
#include "cn10k.h"
#define CQE_ADDR(CQ, idx) ((CQ)->cqe_base + ((CQ)->cqe_size * (idx)))
+static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
+ struct bpf_prog *prog,
+ struct nix_cqe_rx_s *cqe,
+ struct otx2_cq_queue *cq);
+
+static int otx2_nix_cq_op_status(struct otx2_nic *pfvf,
+ struct otx2_cq_queue *cq)
+{
+ u64 incr = (u64)(cq->cq_idx) << 32;
+ u64 status;
+
+ status = otx2_atomic64_fetch_add(incr, pfvf->cq_op_addr);
+
+ if (unlikely(status & BIT_ULL(CQ_OP_STAT_OP_ERR) ||
+ status & BIT_ULL(CQ_OP_STAT_CQ_ERR))) {
+ dev_err(pfvf->dev, "CQ stopped due to error");
+ return -EINVAL;
+ }
+
+ cq->cq_tail = status & 0xFFFFF;
+ cq->cq_head = (status >> 20) & 0xFFFFF;
+ if (cq->cq_tail < cq->cq_head)
+ cq->pend_cqe = (cq->cqe_cnt - cq->cq_head) +
+ cq->cq_tail;
+ else
+ cq->pend_cqe = cq->cq_tail - cq->cq_head;
+
+ return 0;
+}
static struct nix_cqe_hdr_s *otx2_get_next_cqe(struct otx2_cq_queue *cq)
{
@@ -73,6 +104,24 @@ static void otx2_dma_unmap_skb_frags(struct otx2_nic *pfvf, struct sg_list *sg)
sg->num_segs = 0;
}
+static void otx2_xdp_snd_pkt_handler(struct otx2_nic *pfvf,
+ struct otx2_snd_queue *sq,
+ struct nix_cqe_tx_s *cqe)
+{
+ struct nix_send_comp_s *snd_comp = &cqe->comp;
+ struct sg_list *sg;
+ struct page *page;
+ u64 pa;
+
+ sg = &sq->sg[snd_comp->sqe_id];
+
+ pa = otx2_iova_to_phys(pfvf->iommu_domain, sg->dma_addr[0]);
+ otx2_dma_unmap_page(pfvf, sg->dma_addr[0],
+ sg->size[0], DMA_TO_DEVICE);
+ page = virt_to_page(phys_to_virt(pa));
+ put_page(page);
+}
+
static void otx2_snd_pkt_handler(struct otx2_nic *pfvf,
struct otx2_cq_queue *cq,
struct otx2_snd_queue *sq,
@@ -132,8 +181,9 @@ static void otx2_set_rxtstamp(struct otx2_nic *pfvf,
skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(tsns);
}
-static void otx2_skb_add_frag(struct otx2_nic *pfvf, struct sk_buff *skb,
- u64 iova, int len, struct nix_rx_parse_s *parse)
+static bool otx2_skb_add_frag(struct otx2_nic *pfvf, struct sk_buff *skb,
+ u64 iova, int len, struct nix_rx_parse_s *parse,
+ int qidx)
{
struct page *page;
int off = 0;
@@ -154,11 +204,22 @@ static void otx2_skb_add_frag(struct otx2_nic *pfvf, struct sk_buff *skb,
}
page = virt_to_page(va);
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
- va - page_address(page) + off, len - off, pfvf->rbsize);
+ if (likely(skb_shinfo(skb)->nr_frags < MAX_SKB_FRAGS)) {
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
+ va - page_address(page) + off,
+ len - off, pfvf->rbsize);
+
+ otx2_dma_unmap_page(pfvf, iova - OTX2_HEAD_ROOM,
+ pfvf->rbsize, DMA_FROM_DEVICE);
+ return true;
+ }
- otx2_dma_unmap_page(pfvf, iova - OTX2_HEAD_ROOM,
- pfvf->rbsize, DMA_FROM_DEVICE);
+ /* If more than MAX_SKB_FRAGS fragments are received then
+ * give back those buffer pointers to hardware for reuse.
+ */
+ pfvf->hw_ops->aura_freeptr(pfvf, qidx, iova & ~0x07ULL);
+
+ return false;
}
static void otx2_set_rxhash(struct otx2_nic *pfvf,
@@ -285,6 +346,10 @@ static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf,
return;
}
+ if (pfvf->xdp_prog)
+ if (otx2_xdp_rcv_pkt_handler(pfvf, pfvf->xdp_prog, cqe, cq))
+ return;
+
skb = napi_get_frags(napi);
if (unlikely(!skb))
return;
@@ -296,9 +361,9 @@ static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf,
seg_addr = &sg->seg_addr;
seg_size = (void *)sg;
for (seg = 0; seg < sg->segs; seg++, seg_addr++) {
- otx2_skb_add_frag(pfvf, skb, *seg_addr, seg_size[seg],
- parse);
- cq->pool_ptrs++;
+ if (otx2_skb_add_frag(pfvf, skb, *seg_addr,
+ seg_size[seg], parse, cq->cq_idx))
+ cq->pool_ptrs++;
}
start += sizeof(*sg);
}
@@ -318,7 +383,14 @@ static int otx2_rx_napi_handler(struct otx2_nic *pfvf,
struct nix_cqe_rx_s *cqe;
int processed_cqe = 0;
- while (likely(processed_cqe < budget)) {
+ if (cq->pend_cqe >= budget)
+ goto process_cqe;
+
+ if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe)
+ return 0;
+
+process_cqe:
+ while (likely(processed_cqe < budget) && cq->pend_cqe) {
cqe = (struct nix_cqe_rx_s *)CQE_ADDR(cq, cq->cq_head);
if (cqe->hdr.cqe_type == NIX_XQE_TYPE_INVALID ||
!cqe->sg.seg_addr) {
@@ -334,17 +406,13 @@ static int otx2_rx_napi_handler(struct otx2_nic *pfvf,
cqe->hdr.cqe_type = NIX_XQE_TYPE_INVALID;
cqe->sg.seg_addr = 0x00;
processed_cqe++;
+ cq->pend_cqe--;
}
/* Free CQEs to HW */
otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR,
((u64)cq->cq_idx << 32) | processed_cqe);
- if (unlikely(!cq->pool_ptrs))
- return 0;
- /* Refill pool with new buffers */
- pfvf->hw_ops->refill_pool_ptrs(pfvf, cq);
-
return processed_cqe;
}
@@ -364,22 +432,36 @@ void otx2_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
static int otx2_tx_napi_handler(struct otx2_nic *pfvf,
struct otx2_cq_queue *cq, int budget)
{
- int tx_pkts = 0, tx_bytes = 0;
+ int tx_pkts = 0, tx_bytes = 0, qidx;
struct nix_cqe_tx_s *cqe;
int processed_cqe = 0;
- while (likely(processed_cqe < budget)) {
+ if (cq->pend_cqe >= budget)
+ goto process_cqe;
+
+ if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe)
+ return 0;
+
+process_cqe:
+ while (likely(processed_cqe < budget) && cq->pend_cqe) {
cqe = (struct nix_cqe_tx_s *)otx2_get_next_cqe(cq);
if (unlikely(!cqe)) {
if (!processed_cqe)
return 0;
break;
}
- otx2_snd_pkt_handler(pfvf, cq, &pfvf->qset.sq[cq->cint_idx],
- cqe, budget, &tx_pkts, &tx_bytes);
-
+ if (cq->cq_type == CQ_XDP) {
+ qidx = cq->cq_idx - pfvf->hw.rx_queues;
+ otx2_xdp_snd_pkt_handler(pfvf, &pfvf->qset.sq[qidx],
+ cqe);
+ } else {
+ otx2_snd_pkt_handler(pfvf, cq,
+ &pfvf->qset.sq[cq->cint_idx],
+ cqe, budget, &tx_pkts, &tx_bytes);
+ }
cqe->hdr.cqe_type = NIX_XQE_TYPE_INVALID;
processed_cqe++;
+ cq->pend_cqe--;
}
/* Free CQEs to HW */
@@ -402,6 +484,7 @@ static int otx2_tx_napi_handler(struct otx2_nic *pfvf,
int otx2_napi_handler(struct napi_struct *napi, int budget)
{
+ struct otx2_cq_queue *rx_cq = NULL;
struct otx2_cq_poll *cq_poll;
int workdone = 0, cq_idx, i;
struct otx2_cq_queue *cq;
@@ -412,17 +495,13 @@ int otx2_napi_handler(struct napi_struct *napi, int budget)
pfvf = (struct otx2_nic *)cq_poll->dev;
qset = &pfvf->qset;
- for (i = CQS_PER_CINT - 1; i >= 0; i--) {
+ for (i = 0; i < CQS_PER_CINT; i++) {
cq_idx = cq_poll->cq_ids[i];
if (unlikely(cq_idx == CINT_INVALID_CQ))
continue;
cq = &qset->cq[cq_idx];
if (cq->cq_type == CQ_RX) {
- /* If the RQ refill WQ task is running, skip napi
- * scheduler for this queue.
- */
- if (cq->refill_task_sched)
- continue;
+ rx_cq = cq;
workdone += otx2_rx_napi_handler(pfvf, napi,
cq, budget);
} else {
@@ -430,6 +509,8 @@ int otx2_napi_handler(struct napi_struct *napi, int budget)
}
}
+ if (rx_cq && rx_cq->pool_ptrs)
+ pfvf->hw_ops->refill_pool_ptrs(pfvf, rx_cq);
/* Clear the IRQ */
otx2_write64(pfvf, NIX_LF_CINTX_INT(cq_poll->cint_idx), BIT_ULL(0));
@@ -936,10 +1017,19 @@ void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq)
int processed_cqe = 0;
u64 iova, pa;
- while ((cqe = (struct nix_cqe_rx_s *)otx2_get_next_cqe(cq))) {
- if (!cqe->sg.subdc)
- continue;
+ if (pfvf->xdp_prog)
+ xdp_rxq_info_unreg(&cq->xdp_rxq);
+
+ if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe)
+ return;
+
+ while (cq->pend_cqe) {
+ cqe = (struct nix_cqe_rx_s *)otx2_get_next_cqe(cq);
processed_cqe++;
+ cq->pend_cqe--;
+
+ if (!cqe)
+ continue;
if (cqe->sg.segs > 1) {
otx2_free_rcv_seg(pfvf, cqe, cq->cq_idx);
continue;
@@ -965,7 +1055,16 @@ void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq)
sq = &pfvf->qset.sq[cq->cint_idx];
- while ((cqe = (struct nix_cqe_tx_s *)otx2_get_next_cqe(cq))) {
+ if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe)
+ return;
+
+ while (cq->pend_cqe) {
+ cqe = (struct nix_cqe_tx_s *)otx2_get_next_cqe(cq);
+ processed_cqe++;
+ cq->pend_cqe--;
+
+ if (!cqe)
+ continue;
sg = &sq->sg[cqe->comp.sqe_id];
skb = (struct sk_buff *)sg->skb;
if (skb) {
@@ -973,7 +1072,6 @@ void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq)
dev_kfree_skb_any(skb);
sg->skb = (u64)NULL;
}
- processed_cqe++;
}
/* Free CQEs to HW */
@@ -1001,3 +1099,116 @@ int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable)
mutex_unlock(&pfvf->mbox.lock);
return err;
}
+
+static void otx2_xdp_sqe_add_sg(struct otx2_snd_queue *sq, u64 dma_addr,
+ int len, int *offset)
+{
+ struct nix_sqe_sg_s *sg = NULL;
+ u64 *iova = NULL;
+
+ sg = (struct nix_sqe_sg_s *)(sq->sqe_base + *offset);
+ sg->ld_type = NIX_SEND_LDTYPE_LDD;
+ sg->subdc = NIX_SUBDC_SG;
+ sg->segs = 1;
+ sg->seg1_size = len;
+ iova = (void *)sg + sizeof(*sg);
+ *iova = dma_addr;
+ *offset += sizeof(*sg) + sizeof(u64);
+
+ sq->sg[sq->head].dma_addr[0] = dma_addr;
+ sq->sg[sq->head].size[0] = len;
+ sq->sg[sq->head].num_segs = 1;
+}
+
+bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, u16 qidx)
+{
+ struct nix_sqe_hdr_s *sqe_hdr;
+ struct otx2_snd_queue *sq;
+ int offset, free_sqe;
+
+ sq = &pfvf->qset.sq[qidx];
+ free_sqe = (sq->num_sqbs - *sq->aura_fc_addr) * sq->sqe_per_sqb;
+ if (free_sqe < sq->sqe_thresh)
+ return false;
+
+ memset(sq->sqe_base + 8, 0, sq->sqe_size - 8);
+
+ sqe_hdr = (struct nix_sqe_hdr_s *)(sq->sqe_base);
+
+ if (!sqe_hdr->total) {
+ sqe_hdr->aura = sq->aura_id;
+ sqe_hdr->df = 1;
+ sqe_hdr->sq = qidx;
+ sqe_hdr->pnc = 1;
+ }
+ sqe_hdr->total = len;
+ sqe_hdr->sqe_id = sq->head;
+
+ offset = sizeof(*sqe_hdr);
+
+ otx2_xdp_sqe_add_sg(sq, iova, len, &offset);
+ sqe_hdr->sizem1 = (offset / 16) - 1;
+ pfvf->hw_ops->sqe_flush(pfvf, sq, offset, qidx);
+
+ return true;
+}
+
+static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
+ struct bpf_prog *prog,
+ struct nix_cqe_rx_s *cqe,
+ struct otx2_cq_queue *cq)
+{
+ unsigned char *hard_start, *data;
+ int qidx = cq->cq_idx;
+ struct xdp_buff xdp;
+ struct page *page;
+ u64 iova, pa;
+ u32 act;
+ int err;
+
+ iova = cqe->sg.seg_addr - OTX2_HEAD_ROOM;
+ pa = otx2_iova_to_phys(pfvf->iommu_domain, iova);
+ page = virt_to_page(phys_to_virt(pa));
+
+ xdp_init_buff(&xdp, pfvf->rbsize, &cq->xdp_rxq);
+
+ data = (unsigned char *)phys_to_virt(pa);
+ hard_start = page_address(page);
+ xdp_prepare_buff(&xdp, hard_start, data - hard_start,
+ cqe->sg.seg_size, false);
+
+ act = bpf_prog_run_xdp(prog, &xdp);
+
+ switch (act) {
+ case XDP_PASS:
+ break;
+ case XDP_TX:
+ qidx += pfvf->hw.tx_queues;
+ cq->pool_ptrs++;
+ return otx2_xdp_sq_append_pkt(pfvf, iova,
+ cqe->sg.seg_size, qidx);
+ case XDP_REDIRECT:
+ cq->pool_ptrs++;
+ err = xdp_do_redirect(pfvf->netdev, &xdp, prog);
+
+ otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize,
+ DMA_FROM_DEVICE);
+ if (!err)
+ return true;
+ put_page(page);
+ break;
+ default:
+ bpf_warn_invalid_xdp_action(act);
+ break;
+ case XDP_ABORTED:
+ trace_xdp_exception(pfvf->netdev, prog, act);
+ break;
+ case XDP_DROP:
+ otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize,
+ DMA_FROM_DEVICE);
+ put_page(page);
+ cq->pool_ptrs++;
+ return true;
+ }
+ return false;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
index 3ff1ad79c001..f1a04cf9210c 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
@@ -11,6 +11,7 @@
#include <linux/etherdevice.h>
#include <linux/iommu.h>
#include <linux/if_vlan.h>
+#include <net/xdp.h>
#define LBK_CHAN_BASE 0x000
#define SDP_CHAN_BASE 0x700
@@ -25,6 +26,8 @@
#define OTX2_MAX_GSO_SEGS 255
#define OTX2_MAX_FRAGS_IN_SQE 9
+#define MAX_XDP_MTU (1530 - OTX2_ETH_HLEN)
+
/* Rx buffer size should be in multiples of 128bytes */
#define RCV_FRAG_LEN1(x) \
((OTX2_HEAD_ROOM + OTX2_DATA_ALIGN(x)) + \
@@ -36,9 +39,7 @@
#define RCV_FRAG_LEN(x) \
((RCV_FRAG_LEN1(x) < 2048) ? 2048 : RCV_FRAG_LEN1(x))
-#define DMA_BUFFER_LEN(x) \
- ((x) - OTX2_HEAD_ROOM - \
- OTX2_DATA_ALIGN(sizeof(struct skb_shared_info)))
+#define DMA_BUFFER_LEN(x) ((x) - OTX2_HEAD_ROOM)
/* IRQ triggered when NIX_LF_CINTX_CNT[ECOUNT]
* is equal to this value.
@@ -56,6 +57,9 @@
*/
#define CQ_QCOUNT_DEFAULT 1
+#define CQ_OP_STAT_OP_ERR 63
+#define CQ_OP_STAT_CQ_ERR 46
+
struct queue_stats {
u64 bytes;
u64 pkts;
@@ -96,7 +100,8 @@ struct otx2_snd_queue {
enum cq_type {
CQ_RX,
CQ_TX,
- CQS_PER_CINT = 2, /* RQ + SQ */
+ CQ_XDP,
+ CQS_PER_CINT = 3, /* RQ + SQ + XDP */
};
struct otx2_cq_poll {
@@ -122,9 +127,12 @@ struct otx2_cq_queue {
u16 pool_ptrs;
u32 cqe_cnt;
u32 cq_head;
+ u32 cq_tail;
+ u32 pend_cqe;
void *cqe_base;
struct qmem *cqe;
struct otx2_pool *rbpool;
+ struct xdp_rxq_info xdp_rxq;
} ____cacheline_aligned_in_smp;
struct otx2_qset {
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
index 03b4ec630432..e6cb8cd0787d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
@@ -8,9 +8,11 @@
#include <linux/etherdevice.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/net_tstamp.h>
#include "otx2_common.h"
#include "otx2_reg.h"
+#include "otx2_ptp.h"
#include "cn10k.h"
#define DRV_NAME "rvu_nicvf"
@@ -277,7 +279,6 @@ static void otx2vf_vfaf_mbox_destroy(struct otx2_nic *vf)
struct mbox *mbox = &vf->mbox;
if (vf->mbox_wq) {
- flush_workqueue(vf->mbox_wq);
destroy_workqueue(vf->mbox_wq);
vf->mbox_wq = NULL;
}
@@ -500,6 +501,7 @@ static const struct net_device_ops otx2vf_netdev_ops = {
.ndo_set_features = otx2vf_set_features,
.ndo_get_stats64 = otx2_get_stats64,
.ndo_tx_timeout = otx2_tx_timeout,
+ .ndo_do_ioctl = otx2_ioctl,
};
static int otx2_wq_init(struct otx2_nic *vf)
@@ -583,6 +585,7 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
hw->rx_queues = qcount;
hw->tx_queues = qcount;
hw->max_queues = qcount;
+ hw->tot_tx_queues = qcount;
hw->irq_name = devm_kmalloc_array(&hw->pdev->dev, num_vec, NAME_SIZE,
GFP_KERNEL);
@@ -640,6 +643,9 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (err)
goto err_detach_rsrc;
+ /* Don't check for error. Proceed without ptp */
+ otx2_ptp_init(vf);
+
/* Assign default mac address */
otx2_get_mac_from_af(netdev);
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_devlink.c b/drivers/net/ethernet/marvell/prestera/prestera_devlink.c
index 68b442eb6d69..06279cd6da67 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_devlink.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_devlink.c
@@ -345,8 +345,6 @@ static struct prestera_trap prestera_trap_items_arr[] = {
},
};
-static void prestera_devlink_traps_fini(struct prestera_switch *sw);
-
static int prestera_drop_counter_get(struct devlink *devlink,
const struct devlink_trap *trap,
u64 *p_drops);
@@ -381,8 +379,6 @@ static int prestera_trap_action_set(struct devlink *devlink,
enum devlink_trap_action action,
struct netlink_ext_ack *extack);
-static int prestera_devlink_traps_register(struct prestera_switch *sw);
-
static const struct devlink_ops prestera_dl_ops = {
.info_get = prestera_dl_info_get,
.trap_init = prestera_trap_init,
@@ -407,38 +403,18 @@ void prestera_devlink_free(struct prestera_switch *sw)
devlink_free(dl);
}
-int prestera_devlink_register(struct prestera_switch *sw)
+void prestera_devlink_register(struct prestera_switch *sw)
{
struct devlink *dl = priv_to_devlink(sw);
- int err;
-
- err = devlink_register(dl);
- if (err) {
- dev_err(prestera_dev(sw), "devlink_register failed: %d\n", err);
- return err;
- }
- err = prestera_devlink_traps_register(sw);
- if (err) {
- devlink_unregister(dl);
- dev_err(sw->dev->dev, "devlink_traps_register failed: %d\n",
- err);
- return err;
- }
-
- return 0;
+ devlink_register(dl);
}
void prestera_devlink_unregister(struct prestera_switch *sw)
{
- struct prestera_trap_data *trap_data = sw->trap_data;
struct devlink *dl = priv_to_devlink(sw);
- prestera_devlink_traps_fini(sw);
devlink_unregister(dl);
-
- kfree(trap_data->trap_items_arr);
- kfree(trap_data);
}
int prestera_devlink_port_register(struct prestera_port *port)
@@ -486,7 +462,7 @@ struct devlink_port *prestera_devlink_get_port(struct net_device *dev)
return &port->dl_port;
}
-static int prestera_devlink_traps_register(struct prestera_switch *sw)
+int prestera_devlink_traps_register(struct prestera_switch *sw)
{
const u32 groups_count = ARRAY_SIZE(prestera_trap_groups_arr);
const u32 traps_count = ARRAY_SIZE(prestera_trap_items_arr);
@@ -625,8 +601,9 @@ static int prestera_drop_counter_get(struct devlink *devlink,
cpu_code_type, p_drops);
}
-static void prestera_devlink_traps_fini(struct prestera_switch *sw)
+void prestera_devlink_traps_unregister(struct prestera_switch *sw)
{
+ struct prestera_trap_data *trap_data = sw->trap_data;
struct devlink *dl = priv_to_devlink(sw);
const struct devlink_trap *trap;
int i;
@@ -638,4 +615,6 @@ static void prestera_devlink_traps_fini(struct prestera_switch *sw)
devlink_trap_groups_unregister(dl, prestera_trap_groups_arr,
ARRAY_SIZE(prestera_trap_groups_arr));
+ kfree(trap_data->trap_items_arr);
+ kfree(trap_data);
}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_devlink.h b/drivers/net/ethernet/marvell/prestera/prestera_devlink.h
index cc34c3db13a2..b322295bad3a 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_devlink.h
+++ b/drivers/net/ethernet/marvell/prestera/prestera_devlink.h
@@ -9,7 +9,7 @@
struct prestera_switch *prestera_devlink_alloc(struct prestera_device *dev);
void prestera_devlink_free(struct prestera_switch *sw);
-int prestera_devlink_register(struct prestera_switch *sw);
+void prestera_devlink_register(struct prestera_switch *sw);
void prestera_devlink_unregister(struct prestera_switch *sw);
int prestera_devlink_port_register(struct prestera_port *port);
@@ -22,5 +22,7 @@ struct devlink_port *prestera_devlink_get_port(struct net_device *dev);
void prestera_devlink_trap_report(struct prestera_port *port,
struct sk_buff *skb, u8 cpu_code);
+int prestera_devlink_traps_register(struct prestera_switch *sw);
+void prestera_devlink_traps_unregister(struct prestera_switch *sw);
#endif /* _PRESTERA_DEVLINK_H_ */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_main.c b/drivers/net/ethernet/marvell/prestera/prestera_main.c
index 44c670807fb3..d0d5a229d19d 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_main.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_main.c
@@ -137,7 +137,7 @@ static int prestera_port_set_mac_address(struct net_device *dev, void *p)
if (err)
return err;
- ether_addr_copy(dev->dev_addr, addr->sa_data);
+ eth_hw_addr_set(dev, addr->sa_data);
return 0;
}
@@ -338,11 +338,14 @@ static int prestera_port_create(struct prestera_switch *sw, u32 id)
goto err_port_init;
}
+ eth_hw_addr_gen(dev, sw->base_mac, port->fp_id);
/* firmware requires that port's MAC address consist of the first
* 5 bytes of the base MAC address
*/
- memcpy(dev->dev_addr, sw->base_mac, dev->addr_len - 1);
- dev->dev_addr[dev->addr_len - 1] = port->fp_id;
+ if (memcmp(dev->dev_addr, sw->base_mac, ETH_ALEN - 1)) {
+ dev_warn(prestera_dev(sw), "Port MAC address wraps for port(%u)\n", id);
+ dev_addr_mod(dev, 0, sw->base_mac, ETH_ALEN - 1);
+ }
err = prestera_hw_port_mac_set(port, dev->dev_addr);
if (err) {
@@ -851,7 +854,7 @@ static int prestera_switch_init(struct prestera_switch *sw)
if (err)
goto err_span_init;
- err = prestera_devlink_register(sw);
+ err = prestera_devlink_traps_register(sw);
if (err)
goto err_dl_register;
@@ -863,12 +866,13 @@ static int prestera_switch_init(struct prestera_switch *sw)
if (err)
goto err_ports_create;
+ prestera_devlink_register(sw);
return 0;
err_ports_create:
prestera_lag_fini(sw);
err_lag_init:
- prestera_devlink_unregister(sw);
+ prestera_devlink_traps_unregister(sw);
err_dl_register:
prestera_span_fini(sw);
err_span_init:
@@ -888,9 +892,10 @@ err_swdev_register:
static void prestera_switch_fini(struct prestera_switch *sw)
{
+ prestera_devlink_unregister(sw);
prestera_destroy_ports(sw);
prestera_lag_fini(sw);
- prestera_devlink_unregister(sw);
+ prestera_devlink_traps_unregister(sw);
prestera_span_fini(sw);
prestera_acl_fini(sw);
prestera_event_handlers_unregister(sw);
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index fab53c9b8380..6c02e1740609 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -389,7 +389,7 @@ static void inverse_every_nibble(unsigned char *mac_addr)
* Outputs
* return the calculated entry.
*/
-static u32 hash_function(unsigned char *mac_addr_orig)
+static u32 hash_function(const unsigned char *mac_addr_orig)
{
u32 hash_result;
u32 addr0;
@@ -434,7 +434,7 @@ static u32 hash_function(unsigned char *mac_addr_orig)
* -ENOSPC if table full
*/
static int add_del_hash_entry(struct pxa168_eth_private *pep,
- unsigned char *mac_addr,
+ const unsigned char *mac_addr,
u32 rd, u32 skip, int del)
{
struct addr_table_entry *entry, *start;
@@ -521,7 +521,7 @@ static int add_del_hash_entry(struct pxa168_eth_private *pep,
*/
static void update_hash_table_mac_address(struct pxa168_eth_private *pep,
unsigned char *oaddr,
- unsigned char *addr)
+ const unsigned char *addr)
{
/* Delete old entry */
if (oaddr)
@@ -607,7 +607,7 @@ static int pxa168_eth_set_mac_address(struct net_device *dev, void *addr)
if (!is_valid_ether_addr(sa->sa_data))
return -EADDRNOTAVAIL;
memcpy(oldMac, dev->dev_addr, ETH_ALEN);
- memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN);
+ eth_hw_addr_set(dev, sa->sa_data);
mac_h = dev->dev_addr[0] << 24;
mac_h |= dev->dev_addr[1] << 16;
@@ -1434,11 +1434,15 @@ static int pxa168_eth_probe(struct platform_device *pdev)
INIT_WORK(&pep->tx_timeout_task, pxa168_eth_tx_timeout_task);
- err = of_get_mac_address(pdev->dev.of_node, dev->dev_addr);
+ err = of_get_ethdev_address(pdev->dev.of_node, dev);
if (err) {
+ u8 addr[ETH_ALEN];
+
/* try reading the mac address, if set by the bootloader */
- pxa168_eth_get_mac_address(dev, dev->dev_addr);
- if (!is_valid_ether_addr(dev->dev_addr)) {
+ pxa168_eth_get_mac_address(dev, addr);
+ if (is_valid_ether_addr(addr)) {
+ eth_hw_addr_set(dev, addr);
+ } else {
dev_info(&pdev->dev, "Using random mac address\n");
eth_hw_addr_random(dev);
}
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index 051dd3fb5b03..0c864e5bf0a6 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -3459,7 +3459,7 @@ static int skge_set_mac_address(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
+ eth_hw_addr_set(dev, addr->sa_data);
if (!netif_running(dev)) {
memcpy_toio(hw->regs + B2_MAC_1 + port*8, dev->dev_addr, ETH_ALEN);
@@ -3810,6 +3810,7 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port,
{
struct skge_port *skge;
struct net_device *dev = alloc_etherdev(sizeof(*skge));
+ u8 addr[ETH_ALEN];
if (!dev)
return NULL;
@@ -3862,7 +3863,8 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port,
}
/* read the mac address */
- memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port*8, ETH_ALEN);
+ memcpy_fromio(addr, hw->regs + B2_MAC_1 + port*8, ETH_ALEN);
+ eth_hw_addr_set(dev, addr);
return dev;
}
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index e9fc74e54b22..5abb55191e8e 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -3817,7 +3817,7 @@ static int sky2_set_mac_address(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
+ eth_hw_addr_set(dev, addr->sa_data);
memcpy_toio(hw->regs + B2_MAC_1 + port * 8,
dev->dev_addr, ETH_ALEN);
memcpy_toio(hw->regs + B2_MAC_2 + port * 8,
@@ -4440,86 +4440,6 @@ static const struct ethtool_ops sky2_ethtool_ops = {
static struct dentry *sky2_debug;
-
-/*
- * Read and parse the first part of Vital Product Data
- */
-#define VPD_SIZE 128
-#define VPD_MAGIC 0x82
-
-static const struct vpd_tag {
- char tag[2];
- char *label;
-} vpd_tags[] = {
- { "PN", "Part Number" },
- { "EC", "Engineering Level" },
- { "MN", "Manufacturer" },
- { "SN", "Serial Number" },
- { "YA", "Asset Tag" },
- { "VL", "First Error Log Message" },
- { "VF", "Second Error Log Message" },
- { "VB", "Boot Agent ROM Configuration" },
- { "VE", "EFI UNDI Configuration" },
-};
-
-static void sky2_show_vpd(struct seq_file *seq, struct sky2_hw *hw)
-{
- size_t vpd_size;
- loff_t offs;
- u8 len;
- unsigned char *buf;
- u16 reg2;
-
- reg2 = sky2_pci_read16(hw, PCI_DEV_REG2);
- vpd_size = 1 << ( ((reg2 & PCI_VPD_ROM_SZ) >> 14) + 8);
-
- seq_printf(seq, "%s Product Data\n", pci_name(hw->pdev));
- buf = kmalloc(vpd_size, GFP_KERNEL);
- if (!buf) {
- seq_puts(seq, "no memory!\n");
- return;
- }
-
- if (pci_read_vpd(hw->pdev, 0, vpd_size, buf) < 0) {
- seq_puts(seq, "VPD read failed\n");
- goto out;
- }
-
- if (buf[0] != VPD_MAGIC) {
- seq_printf(seq, "VPD tag mismatch: %#x\n", buf[0]);
- goto out;
- }
- len = buf[1];
- if (len == 0 || len > vpd_size - 4) {
- seq_printf(seq, "Invalid id length: %d\n", len);
- goto out;
- }
-
- seq_printf(seq, "%.*s\n", len, buf + 3);
- offs = len + 3;
-
- while (offs < vpd_size - 4) {
- int i;
-
- if (!memcmp("RW", buf + offs, 2)) /* end marker */
- break;
- len = buf[offs + 2];
- if (offs + len + 3 >= vpd_size)
- break;
-
- for (i = 0; i < ARRAY_SIZE(vpd_tags); i++) {
- if (!memcmp(vpd_tags[i].tag, buf + offs, 2)) {
- seq_printf(seq, " %s: %.*s\n",
- vpd_tags[i].label, len, buf + offs + 3);
- break;
- }
- }
- offs += len + 3;
- }
-out:
- kfree(buf);
-}
-
static int sky2_debug_show(struct seq_file *seq, void *v)
{
struct net_device *dev = seq->private;
@@ -4529,9 +4449,7 @@ static int sky2_debug_show(struct seq_file *seq, void *v)
unsigned idx, last;
int sop;
- sky2_show_vpd(seq, hw);
-
- seq_printf(seq, "\nIRQ src=%x mask=%x control=%x\n",
+ seq_printf(seq, "IRQ src=%x mask=%x control=%x\n",
sky2_read32(hw, B0_ISRC),
sky2_read32(hw, B0_IMSK),
sky2_read32(hw, B0_Y2_SP_ICR));
@@ -4802,10 +4720,13 @@ static struct net_device *sky2_init_netdev(struct sky2_hw *hw, unsigned port,
* 1) from device tree data
* 2) from internal registers set by bootloader
*/
- ret = of_get_mac_address(hw->pdev->dev.of_node, dev->dev_addr);
- if (ret)
- memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port * 8,
- ETH_ALEN);
+ ret = of_get_ethdev_address(hw->pdev->dev.of_node, dev);
+ if (ret) {
+ u8 addr[ETH_ALEN];
+
+ memcpy_fromio(addr, hw->regs + B2_MAC_1 + port * 8, ETH_ALEN);
+ eth_hw_addr_set(dev, addr);
+ }
/* if the address is invalid, use a random value */
if (!is_valid_ether_addr(dev->dev_addr)) {
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 398c23cec815..75d67d1b5f6b 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -2588,7 +2588,7 @@ static int __init mtk_init(struct net_device *dev)
struct mtk_eth *eth = mac->hw;
int ret;
- ret = of_get_mac_address(mac->of_node, dev->dev_addr);
+ ret = of_get_ethdev_address(mac->of_node, dev);
if (ret) {
/* If the mac address is invalid, use random mac address */
eth_hw_addr_random(dev);
diff --git a/drivers/net/ethernet/mediatek/mtk_star_emac.c b/drivers/net/ethernet/mediatek/mtk_star_emac.c
index 1d5dd2015453..89ca7960b225 100644
--- a/drivers/net/ethernet/mediatek/mtk_star_emac.c
+++ b/drivers/net/ethernet/mediatek/mtk_star_emac.c
@@ -523,7 +523,7 @@ static void mtk_star_dma_resume_tx(struct mtk_star_priv *priv)
static void mtk_star_set_mac_addr(struct net_device *ndev)
{
struct mtk_star_priv *priv = netdev_priv(ndev);
- u8 *mac_addr = ndev->dev_addr;
+ const u8 *mac_addr = ndev->dev_addr;
unsigned int high, low;
high = mac_addr[0] << 8 | mac_addr[1] << 0;
@@ -1544,7 +1544,7 @@ static int mtk_star_probe(struct platform_device *pdev)
if (ret)
return ret;
- ret = eth_platform_get_mac_address(dev, ndev->dev_addr);
+ ret = platform_get_ethdev_address(dev, ndev);
if (ret || !is_valid_ether_addr(ndev->dev_addr))
eth_hw_addr_random(ndev);
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 8d751383530b..e10b7b04b894 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -2480,7 +2480,6 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
return 0;
err_thread:
- flush_workqueue(priv->mfunc.master.comm_wq);
destroy_workqueue(priv->mfunc.master.comm_wq);
err_slaves:
while (i--) {
@@ -2587,7 +2586,6 @@ void mlx4_multi_func_cleanup(struct mlx4_dev *dev)
int i, port;
if (mlx4_is_master(dev)) {
- flush_workqueue(priv->mfunc.master.comm_wq);
destroy_workqueue(priv->mfunc.master.comm_wq);
for (i = 0; i < dev->num_slaves; i++) {
for (port = 1; port <= MLX4_MAX_PORTS; port++)
@@ -3009,7 +3007,7 @@ int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u8 *mac)
return -EPERM;
}
- s_info->mac = mlx4_mac_to_u64(mac);
+ s_info->mac = ether_addr_to_u64(mac);
mlx4_info(dev, "default mac on vf %d port %d to %llX will take effect only after vf restart\n",
vf, port, s_info->mac);
return 0;
@@ -3195,7 +3193,7 @@ int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting)
port = mlx4_slaves_closest_port(dev, slave, port);
s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
- mlx4_u64_to_mac(mac, s_info->mac);
+ u64_to_ether_addr(s_info->mac, mac);
if (setting && !is_valid_ether_addr(mac)) {
mlx4_info(dev, "Illegal MAC with spoofchk\n");
return -EPERM;
diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c
index f7053a74e6a8..4d4f9cf9facb 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cq.c
@@ -314,7 +314,8 @@ static int mlx4_init_user_cqes(void *buf, int entries, int cqe_size)
buf += PAGE_SIZE;
}
} else {
- err = copy_to_user((void __user *)buf, init_ents, entries * cqe_size) ?
+ err = copy_to_user((void __user *)buf, init_ents,
+ array_size(entries, cqe_size)) ?
-EFAULT : 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index ef518b1040f7..66c8ae29bc7a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -197,6 +197,8 @@ static const char main_strings[][ETH_GSTRING_LEN] = {
/* xdp statistics */
"rx_xdp_drop",
+ "rx_xdp_redirect",
+ "rx_xdp_redirect_fail",
"rx_xdp_tx",
"rx_xdp_tx_full",
@@ -428,6 +430,8 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev,
data[index++] = priv->rx_ring[i]->bytes;
data[index++] = priv->rx_ring[i]->dropped;
data[index++] = priv->rx_ring[i]->xdp_drop;
+ data[index++] = priv->rx_ring[i]->xdp_redirect;
+ data[index++] = priv->rx_ring[i]->xdp_redirect_fail;
data[index++] = priv->rx_ring[i]->xdp_tx;
data[index++] = priv->rx_ring[i]->xdp_tx_full;
}
@@ -520,6 +524,10 @@ static void mlx4_en_get_strings(struct net_device *dev,
sprintf(data + (index++) * ETH_GSTRING_LEN,
"rx%d_xdp_drop", i);
sprintf(data + (index++) * ETH_GSTRING_LEN,
+ "rx%d_xdp_redirect", i);
+ sprintf(data + (index++) * ETH_GSTRING_LEN,
+ "rx%d_xdp_redirect_fail", i);
+ sprintf(data + (index++) * ETH_GSTRING_LEN,
"rx%d_xdp_tx", i);
sprintf(data + (index++) * ETH_GSTRING_LEN,
"rx%d_xdp_tx_full", i);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c
index 109472d6b61f..f1259bdb1a29 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c
@@ -237,7 +237,6 @@ static void mlx4_en_remove(struct mlx4_dev *dev, void *endev_ptr)
if (mdev->pndev[i])
mlx4_en_destroy_netdev(mdev->pndev[i]);
- flush_workqueue(mdev->workqueue);
destroy_workqueue(mdev->workqueue);
(void) mlx4_mr_free(dev, &mdev->mr);
iounmap(mdev->uar_map);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 8af7f2827322..3f6d5c384637 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -527,18 +527,17 @@ static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev,
return err;
}
-static void mlx4_en_u64_to_mac(unsigned char dst_mac[ETH_ALEN + 2], u64 src_mac)
+static void mlx4_en_u64_to_mac(struct net_device *dev, u64 src_mac)
{
- int i;
- for (i = ETH_ALEN - 1; i >= 0; --i) {
- dst_mac[i] = src_mac & 0xff;
- src_mac >>= 8;
- }
- memset(&dst_mac[ETH_ALEN], 0, 2);
+ u8 addr[ETH_ALEN];
+
+ u64_to_ether_addr(src_mac, addr);
+ eth_hw_addr_set(dev, addr);
}
-static int mlx4_en_tunnel_steer_add(struct mlx4_en_priv *priv, unsigned char *addr,
+static int mlx4_en_tunnel_steer_add(struct mlx4_en_priv *priv,
+ const unsigned char *addr,
int qpn, u64 *reg_id)
{
int err;
@@ -559,7 +558,7 @@ static int mlx4_en_tunnel_steer_add(struct mlx4_en_priv *priv, unsigned char *ad
static int mlx4_en_uc_steer_add(struct mlx4_en_priv *priv,
- unsigned char *mac, int *qpn, u64 *reg_id)
+ const unsigned char *mac, int *qpn, u64 *reg_id)
{
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_dev *dev = mdev->dev;
@@ -611,7 +610,8 @@ static int mlx4_en_uc_steer_add(struct mlx4_en_priv *priv,
}
static void mlx4_en_uc_steer_release(struct mlx4_en_priv *priv,
- unsigned char *mac, int qpn, u64 reg_id)
+ const unsigned char *mac,
+ int qpn, u64 reg_id)
{
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_dev *dev = mdev->dev;
@@ -644,7 +644,7 @@ static int mlx4_en_get_qp(struct mlx4_en_priv *priv)
int index = 0;
int err = 0;
int *qpn = &priv->base_qpn;
- u64 mac = mlx4_mac_to_u64(priv->dev->dev_addr);
+ u64 mac = ether_addr_to_u64(priv->dev->dev_addr);
en_dbg(DRV, priv, "Registering MAC: %pM for adding\n",
priv->dev->dev_addr);
@@ -683,7 +683,7 @@ static void mlx4_en_put_qp(struct mlx4_en_priv *priv)
int qpn = priv->base_qpn;
if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
- u64 mac = mlx4_mac_to_u64(priv->dev->dev_addr);
+ u64 mac = ether_addr_to_u64(priv->dev->dev_addr);
en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n",
priv->dev->dev_addr);
mlx4_unregister_mac(dev, priv->port, mac);
@@ -701,14 +701,14 @@ static int mlx4_en_replace_mac(struct mlx4_en_priv *priv, int qpn,
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_dev *dev = mdev->dev;
int err = 0;
- u64 new_mac_u64 = mlx4_mac_to_u64(new_mac);
+ u64 new_mac_u64 = ether_addr_to_u64(new_mac);
if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) {
struct hlist_head *bucket;
unsigned int mac_hash;
struct mlx4_mac_entry *entry;
struct hlist_node *tmp;
- u64 prev_mac_u64 = mlx4_mac_to_u64(prev_mac);
+ u64 prev_mac_u64 = ether_addr_to_u64(prev_mac);
bucket = &priv->mac_hash[prev_mac[MLX4_EN_MAC_HASH_IDX]];
hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
@@ -797,7 +797,7 @@ static int mlx4_en_set_mac(struct net_device *dev, void *addr)
if (err)
goto out;
- memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN);
+ eth_hw_addr_set(dev, saddr->sa_data);
mlx4_en_update_user_mac(priv, new_mac);
out:
mutex_unlock(&mdev->state_lock);
@@ -1076,7 +1076,7 @@ static void mlx4_en_do_multicast(struct mlx4_en_priv *priv,
mlx4_en_cache_mclist(dev);
netif_addr_unlock_bh(dev);
list_for_each_entry(mclist, &priv->mc_list, list) {
- mcast_addr = mlx4_mac_to_u64(mclist->addr);
+ mcast_addr = ether_addr_to_u64(mclist->addr);
mlx4_SET_MCAST_FLTR(mdev->dev, priv->port,
mcast_addr, 0, MLX4_MCAST_CONFIG);
}
@@ -1169,7 +1169,7 @@ static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv,
found = true;
if (!found) {
- mac = mlx4_mac_to_u64(entry->mac);
+ mac = ether_addr_to_u64(entry->mac);
mlx4_en_uc_steer_release(priv, entry->mac,
priv->base_qpn,
entry->reg_id);
@@ -1212,7 +1212,7 @@ static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv,
priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
break;
}
- mac = mlx4_mac_to_u64(ha->addr);
+ mac = ether_addr_to_u64(ha->addr);
memcpy(entry->mac, ha->addr, ETH_ALEN);
err = mlx4_register_mac(mdev->dev, priv->port, mac);
if (err < 0) {
@@ -1348,7 +1348,7 @@ static void mlx4_en_delete_rss_steer_rules(struct mlx4_en_priv *priv)
for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) {
bucket = &priv->mac_hash[i];
hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
- mac = mlx4_mac_to_u64(entry->mac);
+ mac = ether_addr_to_u64(entry->mac);
en_dbg(DRV, priv, "Registering MAC:%pM for deleting\n",
entry->mac);
mlx4_en_uc_steer_release(priv, entry->mac,
@@ -3267,7 +3267,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
/* Set default MAC */
dev->addr_len = ETH_ALEN;
- mlx4_en_u64_to_mac(dev->dev_addr, mdev->dev->caps.def_mac[priv->port]);
+ mlx4_en_u64_to_mac(dev, mdev->dev->caps.def_mac[priv->port]);
if (!is_valid_ether_addr(dev->dev_addr)) {
en_err(priv, "Port: %d, invalid mac burned: %pM, quitting\n",
priv->port, dev->dev_addr);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c
index 0158b88bea5b..532997eba698 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c
@@ -244,6 +244,8 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
priv->port_stats.rx_chksum_complete = 0;
priv->port_stats.rx_alloc_pages = 0;
priv->xdp_stats.rx_xdp_drop = 0;
+ priv->xdp_stats.rx_xdp_redirect = 0;
+ priv->xdp_stats.rx_xdp_redirect_fail = 0;
priv->xdp_stats.rx_xdp_tx = 0;
priv->xdp_stats.rx_xdp_tx_full = 0;
for (i = 0; i < priv->rx_ring_num; i++) {
@@ -255,6 +257,8 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
priv->port_stats.rx_chksum_complete += READ_ONCE(ring->csum_complete);
priv->port_stats.rx_alloc_pages += READ_ONCE(ring->rx_alloc_pages);
priv->xdp_stats.rx_xdp_drop += READ_ONCE(ring->xdp_drop);
+ priv->xdp_stats.rx_xdp_redirect += READ_ONCE(ring->xdp_redirect);
+ priv->xdp_stats.rx_xdp_redirect_fail += READ_ONCE(ring->xdp_redirect_fail);
priv->xdp_stats.rx_xdp_tx += READ_ONCE(ring->xdp_tx);
priv->xdp_stats.rx_xdp_tx_full += READ_ONCE(ring->xdp_tx_full);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 7f6d3b82c29b..650e6a1844ae 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -669,6 +669,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
struct bpf_prog *xdp_prog;
int cq_ring = cq->ring;
bool doorbell_pending;
+ bool xdp_redir_flush;
struct mlx4_cqe *cqe;
struct xdp_buff xdp;
int polled = 0;
@@ -682,6 +683,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
xdp_prog = rcu_dereference_bh(ring->xdp_prog);
xdp_init_buff(&xdp, priv->frag_info[0].frag_stride, &ring->xdp_rxq);
doorbell_pending = false;
+ xdp_redir_flush = false;
/* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx
* descriptor offset can be deduced from the CQE index instead of
@@ -790,6 +792,16 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
switch (act) {
case XDP_PASS:
break;
+ case XDP_REDIRECT:
+ if (likely(!xdp_do_redirect(dev, &xdp, xdp_prog))) {
+ ring->xdp_redirect++;
+ xdp_redir_flush = true;
+ frags[0].page = NULL;
+ goto next;
+ }
+ ring->xdp_redirect_fail++;
+ trace_xdp_exception(dev, xdp_prog, act);
+ goto xdp_drop_no_cnt;
case XDP_TX:
if (likely(!mlx4_en_xmit_frame(ring, frags, priv,
length, cq_ring,
@@ -897,6 +909,9 @@ next:
break;
}
+ if (xdp_redir_flush)
+ xdp_do_flush();
+
if (likely(polled)) {
if (doorbell_pending) {
priv->tx_cq[TX_XDP][cq_ring]->xdp_busy = true;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index c56b9dba4c71..817f4154b86d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -130,6 +130,7 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
ring->bf_enabled = !!(priv->pflags &
MLX4_EN_PRIV_FLAGS_BLUEFLAME);
}
+ ring->doorbell_address = ring->bf.uar->map + MLX4_SEND_DOORBELL;
ring->hwtstamp_tx_type = priv->hwtstamp_config.tx_type;
ring->queue_index = queue_index;
@@ -753,8 +754,7 @@ void mlx4_en_xmit_doorbell(struct mlx4_en_tx_ring *ring)
#else
iowrite32be(
#endif
- (__force u32)ring->doorbell_qpn,
- ring->bf.uar->map + MLX4_SEND_DOORBELL);
+ (__force u32)ring->doorbell_qpn, ring->doorbell_address);
}
static void mlx4_en_tx_write_desc(struct mlx4_en_tx_ring *ring,
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index dc4ac1a2b6b6..42c96c9d7fb1 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -3105,7 +3105,7 @@ void mlx4_replace_zero_macs(struct mlx4_dev *dev)
dev->caps.port_type[i] == MLX4_PORT_TYPE_ETH) {
eth_random_addr(mac_addr);
dev->port_random_macs |= 1 << i;
- dev->caps.def_mac[i] = mlx4_mac_to_u64(mac_addr);
+ dev->caps.def_mac[i] = ether_addr_to_u64(mac_addr);
}
}
EXPORT_SYMBOL_GPL(mlx4_replace_zero_macs);
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 5a6b0fcaf7f8..b187c210d4d6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -4015,9 +4015,6 @@ static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
mutex_init(&dev->persist->interface_state_mutex);
mutex_init(&dev->persist->pci_status_mutex);
- ret = devlink_register(devlink);
- if (ret)
- goto err_persist_free;
ret = devlink_params_register(devlink, mlx4_devlink_params,
ARRAY_SIZE(mlx4_devlink_params));
if (ret)
@@ -4027,17 +4024,15 @@ static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
if (ret)
goto err_params_unregister;
- devlink_params_publish(devlink);
- devlink_reload_enable(devlink);
pci_save_state(pdev);
+ devlink_set_features(devlink, DEVLINK_F_RELOAD);
+ devlink_register(devlink);
return 0;
err_params_unregister:
devlink_params_unregister(devlink, mlx4_devlink_params,
ARRAY_SIZE(mlx4_devlink_params));
err_devlink_unregister:
- devlink_unregister(devlink);
-err_persist_free:
kfree(dev->persist);
err_devlink_free:
devlink_free(devlink);
@@ -4140,7 +4135,7 @@ static void mlx4_remove_one(struct pci_dev *pdev)
struct devlink *devlink = priv_to_devlink(priv);
int active_vfs = 0;
- devlink_reload_disable(devlink);
+ devlink_unregister(devlink);
if (mlx4_is_slave(dev))
persist->interface_state |= MLX4_INTERFACE_STATE_NOWAIT;
@@ -4176,7 +4171,6 @@ static void mlx4_remove_one(struct pci_dev *pdev)
mlx4_pci_disable_device(dev);
devlink_params_unregister(devlink, mlx4_devlink_params,
ARRAY_SIZE(mlx4_devlink_params));
- devlink_unregister(devlink);
kfree(dev->persist);
devlink_free(devlink);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index f1b4ad9c66d2..f1716a83a4d3 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -1046,7 +1046,7 @@ int mlx4_flow_detach(struct mlx4_dev *dev, u64 reg_id)
}
EXPORT_SYMBOL_GPL(mlx4_flow_detach);
-int mlx4_tunnel_steer_add(struct mlx4_dev *dev, unsigned char *addr,
+int mlx4_tunnel_steer_add(struct mlx4_dev *dev, const unsigned char *addr,
int port, int qpn, u16 prio, u64 *reg_id)
{
int err;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 6bf558c5ec10..e132ff4c82f2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -283,6 +283,7 @@ struct mlx4_en_tx_ring {
struct mlx4_bf bf;
/* Following part should be mostly read */
+ void __iomem *doorbell_address;
__be32 doorbell_qpn;
__be32 mr_key;
u32 size; /* number of TXBBs */
@@ -340,6 +341,8 @@ struct mlx4_en_rx_ring {
unsigned long csum_complete;
unsigned long rx_alloc_pages;
unsigned long xdp_drop;
+ unsigned long xdp_redirect;
+ unsigned long xdp_redirect_fail;
unsigned long xdp_tx;
unsigned long xdp_tx_full;
unsigned long dropped;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h
index 7b51ae8cf759..e9cd4bb6f83d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h
@@ -42,9 +42,11 @@ struct mlx4_en_port_stats {
struct mlx4_en_xdp_stats {
unsigned long rx_xdp_drop;
+ unsigned long rx_xdp_redirect;
+ unsigned long rx_xdp_redirect_fail;
unsigned long rx_xdp_tx;
unsigned long rx_xdp_tx_full;
-#define NUM_XDP_STATS 3
+#define NUM_XDP_STATS 5
};
struct mlx4_en_phy_stats {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index 63032cd6efb1..bdb271b604d9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -14,10 +14,10 @@ obj-$(CONFIG_MLX5_CORE) += mlx5_core.o
mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
health.o mcg.o cq.o alloc.o port.o mr.o pd.o \
transobj.o vport.o sriov.o fs_cmd.o fs_core.o pci_irq.o \
- fs_counters.o fs_ft_pool.o rl.o lag.o dev.o events.o wq.o lib/gid.o \
+ fs_counters.o fs_ft_pool.o rl.o lag/lag.o dev.o events.o wq.o lib/gid.o \
lib/devcom.o lib/pci_vsc.o lib/dm.o lib/fs_ttc.o diag/fs_tracepoint.o \
diag/fw_tracer.o diag/crdump.o devlink.o diag/rsc_dump.o \
- fw_reset.o qos.o
+ fw_reset.o qos.o lib/tout.o
#
# Netdev basic
@@ -37,7 +37,7 @@ mlx5_core-$(CONFIG_MLX5_EN_ARFS) += en_arfs.o
mlx5_core-$(CONFIG_MLX5_EN_RXNFC) += en_fs_ethtool.o
mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o en/port_buffer.o
mlx5_core-$(CONFIG_PCI_HYPERV_INTERFACE) += en/hv_vhca_stats.o
-mlx5_core-$(CONFIG_MLX5_ESWITCH) += lag_mp.o lib/geneve.o lib/port_tun.o \
+mlx5_core-$(CONFIG_MLX5_ESWITCH) += lag/mp.o lag/port_sel.o lib/geneve.o lib/port_tun.o \
en_rep.o en/rep/bond.o en/mod_hdr.o \
en/mapping.o
mlx5_core-$(CONFIG_MLX5_CLS_ACT) += en_tc.o en/rep/tc.o en/rep/neigh.o \
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index db5dfff585c9..f71ec4d9d68e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -45,6 +45,7 @@
#include "mlx5_core.h"
#include "lib/eq.h"
+#include "lib/tout.h"
enum {
CMD_IF_REV = 5,
@@ -225,9 +226,13 @@ static void set_signature(struct mlx5_cmd_work_ent *ent, int csum)
static void poll_timeout(struct mlx5_cmd_work_ent *ent)
{
- unsigned long poll_end = jiffies + msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC + 1000);
+ struct mlx5_core_dev *dev = container_of(ent->cmd, struct mlx5_core_dev, cmd);
+ u64 cmd_to_ms = mlx5_tout_ms(dev, CMD);
+ unsigned long poll_end;
u8 own;
+ poll_end = jiffies + msecs_to_jiffies(cmd_to_ms + 1000);
+
do {
own = READ_ONCE(ent->lay->status_own);
if (!(own & CMD_OWNER_HW)) {
@@ -925,15 +930,18 @@ static void cmd_work_handler(struct work_struct *work)
{
struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work);
struct mlx5_cmd *cmd = ent->cmd;
- struct mlx5_core_dev *dev = container_of(cmd, struct mlx5_core_dev, cmd);
- unsigned long cb_timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC);
+ bool poll_cmd = ent->polling;
struct mlx5_cmd_layout *lay;
+ struct mlx5_core_dev *dev;
+ unsigned long cb_timeout;
struct semaphore *sem;
unsigned long flags;
- bool poll_cmd = ent->polling;
int alloc_ret;
int cmd_mode;
+ dev = container_of(cmd, struct mlx5_core_dev, cmd);
+ cb_timeout = msecs_to_jiffies(mlx5_tout_ms(dev, CMD));
+
complete(&ent->handling);
sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
down(sem);
@@ -1073,7 +1081,7 @@ static void wait_func_handle_exec_timeout(struct mlx5_core_dev *dev,
static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
{
- unsigned long timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC);
+ unsigned long timeout = msecs_to_jiffies(mlx5_tout_ms(dev, CMD));
struct mlx5_cmd *cmd = &dev->cmd;
int err;
@@ -2058,7 +2066,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
return -EINVAL;
}
- cmd->stats = kvzalloc(MLX5_CMD_OP_MAX * sizeof(*cmd->stats), GFP_KERNEL);
+ cmd->stats = kvcalloc(MLX5_CMD_OP_MAX, sizeof(*cmd->stats), GFP_KERNEL);
if (!cmd->stats)
return -ENOMEM;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
index e8093c4e09d4..a8b84d53dfb0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
@@ -33,6 +33,7 @@
#include <linux/mlx5/driver.h>
#include <linux/mlx5/eswitch.h>
#include <linux/mlx5/mlx5_ifc_vdpa.h>
+#include <linux/mlx5/vport.h>
#include "mlx5_core.h"
/* intf dev list mutex */
@@ -537,6 +538,16 @@ int mlx5_rescan_drivers_locked(struct mlx5_core_dev *dev)
return add_drivers(dev);
}
+static bool mlx5_same_hw_devs(struct mlx5_core_dev *dev, struct mlx5_core_dev *peer_dev)
+{
+ u64 fsystem_guid, psystem_guid;
+
+ fsystem_guid = mlx5_query_nic_system_image_guid(dev);
+ psystem_guid = mlx5_query_nic_system_image_guid(peer_dev);
+
+ return (fsystem_guid && psystem_guid && fsystem_guid == psystem_guid);
+}
+
static u32 mlx5_gen_pci_id(const struct mlx5_core_dev *dev)
{
return (u32)((pci_domain_nr(dev->pdev->bus) << 16) |
@@ -556,7 +567,8 @@ static int next_phys_dev(struct device *dev, const void *data)
if (mdev == curr)
return 0;
- if (mlx5_gen_pci_id(mdev) != mlx5_gen_pci_id(curr))
+ if (!mlx5_same_hw_devs(mdev, (struct mlx5_core_dev *)curr) &&
+ mlx5_gen_pci_id(mdev) != mlx5_gen_pci_id(curr))
return 0;
return 1;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
index dcf9f27ba2ef..1c98652b244a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
@@ -136,6 +136,7 @@ static int mlx5_devlink_reload_down(struct devlink *devlink, bool netns_change,
struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
+ struct pci_dev *pdev = dev->pdev;
bool sf_dev_allocated;
sf_dev_allocated = mlx5_sf_dev_allocated(dev);
@@ -153,6 +154,10 @@ static int mlx5_devlink_reload_down(struct devlink *devlink, bool netns_change,
return -EOPNOTSUPP;
}
+ if (pci_num_vf(pdev)) {
+ NL_SET_ERR_MSG_MOD(extack, "reload while VFs are present is unfavorable");
+ }
+
switch (action) {
case DEVLINK_RELOAD_ACTION_DRIVER_REINIT:
mlx5_unload_one(dev);
@@ -449,7 +454,8 @@ static int mlx5_devlink_enable_roce_validate(struct devlink *devlink, u32 id,
struct mlx5_core_dev *dev = devlink_priv(devlink);
bool new_state = val.vbool;
- if (new_state && !MLX5_CAP_GEN(dev, roce)) {
+ if (new_state && !MLX5_CAP_GEN(dev, roce) &&
+ !MLX5_CAP_GEN(dev, roce_rw_supported)) {
NL_SET_ERR_MSG_MOD(extack, "Device doesn't support RoCE");
return -EOPNOTSUPP;
}
@@ -625,7 +631,6 @@ static int mlx5_devlink_eth_param_register(struct devlink *devlink)
devlink_param_driverinit_value_set(devlink,
DEVLINK_PARAM_GENERIC_ID_ENABLE_ETH,
value);
- devlink_param_publish(devlink, &enable_eth_param);
return 0;
}
@@ -636,7 +641,6 @@ static void mlx5_devlink_eth_param_unregister(struct devlink *devlink)
if (!mlx5_eth_supported(dev))
return;
- devlink_param_unpublish(devlink, &enable_eth_param);
devlink_param_unregister(devlink, &enable_eth_param);
}
@@ -672,7 +676,6 @@ static int mlx5_devlink_rdma_param_register(struct devlink *devlink)
devlink_param_driverinit_value_set(devlink,
DEVLINK_PARAM_GENERIC_ID_ENABLE_RDMA,
value);
- devlink_param_publish(devlink, &enable_rdma_param);
return 0;
}
@@ -681,7 +684,6 @@ static void mlx5_devlink_rdma_param_unregister(struct devlink *devlink)
if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND))
return;
- devlink_param_unpublish(devlink, &enable_rdma_param);
devlink_param_unregister(devlink, &enable_rdma_param);
}
@@ -706,7 +708,6 @@ static int mlx5_devlink_vnet_param_register(struct devlink *devlink)
devlink_param_driverinit_value_set(devlink,
DEVLINK_PARAM_GENERIC_ID_ENABLE_VNET,
value);
- devlink_param_publish(devlink, &enable_rdma_param);
return 0;
}
@@ -717,7 +718,6 @@ static void mlx5_devlink_vnet_param_unregister(struct devlink *devlink)
if (!mlx5_vnet_supported(dev))
return;
- devlink_param_unpublish(devlink, &enable_vnet_param);
devlink_param_unregister(devlink, &enable_vnet_param);
}
@@ -797,18 +797,15 @@ static void mlx5_devlink_traps_unregister(struct devlink *devlink)
int mlx5_devlink_register(struct devlink *devlink)
{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
int err;
- err = devlink_register(devlink);
- if (err)
- return err;
-
err = devlink_params_register(devlink, mlx5_devlink_params,
ARRAY_SIZE(mlx5_devlink_params));
if (err)
- goto params_reg_err;
+ return err;
+
mlx5_devlink_set_params_init_values(devlink);
- devlink_params_publish(devlink);
err = mlx5_devlink_auxdev_params_register(devlink);
if (err)
@@ -818,6 +815,9 @@ int mlx5_devlink_register(struct devlink *devlink)
if (err)
goto traps_reg_err;
+ if (!mlx5_core_is_mp_slave(dev))
+ devlink_set_features(devlink, DEVLINK_F_RELOAD);
+
return 0;
traps_reg_err:
@@ -825,8 +825,6 @@ traps_reg_err:
auxdev_reg_err:
devlink_params_unregister(devlink, mlx5_devlink_params,
ARRAY_SIZE(mlx5_devlink_params));
-params_reg_err:
- devlink_unregister(devlink);
return err;
}
@@ -834,8 +832,6 @@ void mlx5_devlink_unregister(struct devlink *devlink)
{
mlx5_devlink_traps_unregister(devlink);
mlx5_devlink_auxdev_params_unregister(devlink);
- devlink_params_unpublish(devlink);
devlink_params_unregister(devlink, mlx5_devlink_params,
ARRAY_SIZE(mlx5_devlink_params));
- devlink_unregister(devlink);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c
index 87d65f6b5310..7841ef6c193c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c
@@ -235,6 +235,9 @@ const char *parse_fs_dst(struct trace_seq *p,
const char *ret = trace_seq_buffer_ptr(p);
switch (dst->type) {
+ case MLX5_FLOW_DESTINATION_TYPE_UPLINK:
+ trace_seq_printf(p, "uplink\n");
+ break;
case MLX5_FLOW_DESTINATION_TYPE_VPORT:
trace_seq_printf(p, "vport=%u\n", dst->vport.num);
break;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
index f9cf9fb31547..da1bec04efff 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
@@ -1069,7 +1069,6 @@ void mlx5_fw_tracer_destroy(struct mlx5_fw_tracer *tracer)
mlx5_fw_tracer_clean_saved_traces_array(tracer);
mlx5_fw_tracer_free_strings_db(tracer);
mlx5_fw_tracer_destroy_log_buf(tracer);
- flush_workqueue(tracer->work_queue);
destroy_workqueue(tracer->work_queue);
kvfree(tracer);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 03a7a4ce5cd5..a3a4fece0cac 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -220,8 +220,6 @@ struct mlx5e_umr_wqe {
struct mlx5_mtt inline_mtts[0];
};
-extern const char mlx5e_self_tests[][ETH_GSTRING_LEN];
-
enum mlx5e_priv_flag {
MLX5E_PFLAG_RX_CQE_BASED_MODER,
MLX5E_PFLAG_TX_CQE_BASED_MODER,
@@ -253,6 +251,9 @@ struct mlx5e_params {
u16 mode;
u8 num_tc;
struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
+ struct {
+ struct mlx5e_mqprio_rl *rl;
+ } channel;
} mqprio;
bool rx_cqe_compress_def;
bool tunneled_offload_en;
@@ -879,6 +880,7 @@ struct mlx5e_priv {
#endif
struct mlx5e_scratchpad scratchpad;
struct mlx5e_htb htb;
+ struct mlx5e_mqprio_rl *mqprio_rl;
};
struct mlx5e_rx_handlers {
@@ -918,6 +920,7 @@ void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s)
void mlx5e_init_l2_addr(struct mlx5e_priv *priv);
int mlx5e_self_test_num(struct mlx5e_priv *priv);
+int mlx5e_self_test_fill_strings(struct mlx5e_priv *priv, u8 *data);
void mlx5e_self_test(struct net_device *ndev, struct ethtool_test *etest,
u64 *buf);
void mlx5e_set_rx_mode_work(struct work_struct *work);
@@ -1003,7 +1006,8 @@ int mlx5e_modify_sq(struct mlx5_core_dev *mdev, u32 sqn,
struct mlx5e_modify_sq_param *p);
int mlx5e_open_txqsq(struct mlx5e_channel *c, u32 tisn, int txq_ix,
struct mlx5e_params *params, struct mlx5e_sq_param *param,
- struct mlx5e_txqsq *sq, int tc, u16 qos_queue_group_id, u16 qos_qid);
+ struct mlx5e_txqsq *sq, int tc, u16 qos_queue_group_id,
+ struct mlx5e_sq_stats *sq_stats);
void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq);
void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq);
void mlx5e_free_txqsq(struct mlx5e_txqsq *sq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c
index 86e079310ac3..ae52e7f38306 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c
@@ -24,7 +24,7 @@ int mlx5e_devlink_port_register(struct mlx5e_priv *priv)
if (mlx5_core_is_pf(priv->mdev)) {
attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
- attrs.phys.port_number = PCI_FUNC(priv->mdev->pdev->devfn);
+ attrs.phys.port_number = mlx5_get_dev_index(priv->mdev);
if (MLX5_ESWITCH_MANAGER(priv->mdev)) {
mlx5e_devlink_get_port_parent_id(priv->mdev, &ppid);
memcpy(attrs.switch_id.id, ppid.id, ppid.id_len);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
index a88a1a48229f..678ffbb48a25 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
@@ -125,15 +125,15 @@ struct mlx5e_ethtool_steering {
void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv);
void mlx5e_ethtool_cleanup_steering(struct mlx5e_priv *priv);
-int mlx5e_ethtool_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd);
-int mlx5e_ethtool_get_rxnfc(struct net_device *dev,
+int mlx5e_ethtool_set_rxnfc(struct mlx5e_priv *priv, struct ethtool_rxnfc *cmd);
+int mlx5e_ethtool_get_rxnfc(struct mlx5e_priv *priv,
struct ethtool_rxnfc *info, u32 *rule_locs);
#else
static inline void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv) { }
static inline void mlx5e_ethtool_cleanup_steering(struct mlx5e_priv *priv) { }
-static inline int mlx5e_ethtool_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
+static inline int mlx5e_ethtool_set_rxnfc(struct mlx5e_priv *priv, struct ethtool_rxnfc *cmd)
{ return -EOPNOTSUPP; }
-static inline int mlx5e_ethtool_get_rxnfc(struct net_device *dev,
+static inline int mlx5e_ethtool_get_rxnfc(struct mlx5e_priv *priv,
struct ethtool_rxnfc *info, u32 *rule_locs)
{ return -EOPNOTSUPP; }
#endif /* CONFIG_MLX5_EN_RXNFC */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.h b/drivers/net/ethernet/mellanox/mlx5/core/en/health.h
index 018262d0164b..d5b7110a4265 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.h
@@ -32,7 +32,6 @@ void mlx5e_reporter_rq_cqe_err(struct mlx5e_rq *rq);
void mlx5e_reporter_rx_timeout(struct mlx5e_rq *rq);
#define MLX5E_REPORTER_PER_Q_MAX_LEN 256
-#define MLX5E_REPORTER_FLUSH_TIMEOUT_MSEC 2000
struct mlx5e_err_ctx {
int (*recover)(void *ctx);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
index e8a8d78e3e4d..50977f01a050 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
@@ -7,6 +7,21 @@
#define BYTES_IN_MBIT 125000
+int mlx5e_qos_bytes_rate_check(struct mlx5_core_dev *mdev, u64 nbytes)
+{
+ if (nbytes < BYTES_IN_MBIT) {
+ qos_warn(mdev, "Input rate (%llu Bytes/sec) below minimum supported (%u Bytes/sec)\n",
+ nbytes, BYTES_IN_MBIT);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static u32 mlx5e_qos_bytes2mbits(struct mlx5_core_dev *mdev, u64 nbytes)
+{
+ return div_u64(nbytes, BYTES_IN_MBIT);
+}
+
int mlx5e_qos_max_leaf_nodes(struct mlx5_core_dev *mdev)
{
return min(MLX5E_QOS_MAX_LEAF_NODES, mlx5_qos_max_leaf_nodes(mdev));
@@ -238,7 +253,8 @@ static int mlx5e_open_qos_sq(struct mlx5e_priv *priv, struct mlx5e_channels *chs
if (err)
goto err_free_sq;
err = mlx5e_open_txqsq(c, priv->tisn[c->lag_port][0], txq_ix, params,
- &param_sq, sq, 0, node->hw_id, node->qid);
+ &param_sq, sq, 0, node->hw_id,
+ priv->htb.qos_sq_stats[node->qid]);
if (err)
goto err_close_cq;
@@ -979,3 +995,87 @@ int mlx5e_htb_node_modify(struct mlx5e_priv *priv, u16 classid, u64 rate, u64 ce
return err;
}
+
+struct mlx5e_mqprio_rl {
+ struct mlx5_core_dev *mdev;
+ u32 root_id;
+ u32 *leaves_id;
+ u8 num_tc;
+};
+
+struct mlx5e_mqprio_rl *mlx5e_mqprio_rl_alloc(void)
+{
+ return kvzalloc(sizeof(struct mlx5e_mqprio_rl), GFP_KERNEL);
+}
+
+void mlx5e_mqprio_rl_free(struct mlx5e_mqprio_rl *rl)
+{
+ kvfree(rl);
+}
+
+int mlx5e_mqprio_rl_init(struct mlx5e_mqprio_rl *rl, struct mlx5_core_dev *mdev, u8 num_tc,
+ u64 max_rate[])
+{
+ int err;
+ int tc;
+
+ if (!mlx5_qos_is_supported(mdev)) {
+ qos_warn(mdev, "Missing QoS capabilities. Try disabling SRIOV or use a supported device.");
+ return -EOPNOTSUPP;
+ }
+ if (num_tc > mlx5e_qos_max_leaf_nodes(mdev))
+ return -EINVAL;
+
+ rl->mdev = mdev;
+ rl->num_tc = num_tc;
+ rl->leaves_id = kvcalloc(num_tc, sizeof(*rl->leaves_id), GFP_KERNEL);
+ if (!rl->leaves_id)
+ return -ENOMEM;
+
+ err = mlx5_qos_create_root_node(mdev, &rl->root_id);
+ if (err)
+ goto err_free_leaves;
+
+ qos_dbg(mdev, "Root created, id %#x\n", rl->root_id);
+
+ for (tc = 0; tc < num_tc; tc++) {
+ u32 max_average_bw;
+
+ max_average_bw = mlx5e_qos_bytes2mbits(mdev, max_rate[tc]);
+ err = mlx5_qos_create_leaf_node(mdev, rl->root_id, 0, max_average_bw,
+ &rl->leaves_id[tc]);
+ if (err)
+ goto err_destroy_leaves;
+
+ qos_dbg(mdev, "Leaf[%d] created, id %#x, max average bw %u Mbits/sec\n",
+ tc, rl->leaves_id[tc], max_average_bw);
+ }
+ return 0;
+
+err_destroy_leaves:
+ while (--tc >= 0)
+ mlx5_qos_destroy_node(mdev, rl->leaves_id[tc]);
+ mlx5_qos_destroy_node(mdev, rl->root_id);
+err_free_leaves:
+ kvfree(rl->leaves_id);
+ return err;
+}
+
+void mlx5e_mqprio_rl_cleanup(struct mlx5e_mqprio_rl *rl)
+{
+ int tc;
+
+ for (tc = 0; tc < rl->num_tc; tc++)
+ mlx5_qos_destroy_node(rl->mdev, rl->leaves_id[tc]);
+ mlx5_qos_destroy_node(rl->mdev, rl->root_id);
+ kvfree(rl->leaves_id);
+}
+
+int mlx5e_mqprio_rl_get_node_hw_id(struct mlx5e_mqprio_rl *rl, int tc, u32 *hw_id)
+{
+ if (tc >= rl->num_tc)
+ return -EINVAL;
+
+ *hw_id = rl->leaves_id[tc];
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.h b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.h
index 757682b7c0e0..b7558907ba20 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.h
@@ -12,6 +12,7 @@ struct mlx5e_priv;
struct mlx5e_channels;
struct mlx5e_channel;
+int mlx5e_qos_bytes_rate_check(struct mlx5_core_dev *mdev, u64 nbytes);
int mlx5e_qos_max_leaf_nodes(struct mlx5_core_dev *mdev);
int mlx5e_qos_cur_leaf_nodes(struct mlx5e_priv *priv);
@@ -41,4 +42,12 @@ int mlx5e_htb_leaf_del_last(struct mlx5e_priv *priv, u16 classid, bool force,
int mlx5e_htb_node_modify(struct mlx5e_priv *priv, u16 classid, u64 rate, u64 ceil,
struct netlink_ext_ack *extack);
+/* MQPRIO TX rate limit */
+struct mlx5e_mqprio_rl;
+struct mlx5e_mqprio_rl *mlx5e_mqprio_rl_alloc(void);
+void mlx5e_mqprio_rl_free(struct mlx5e_mqprio_rl *rl);
+int mlx5e_mqprio_rl_init(struct mlx5e_mqprio_rl *rl, struct mlx5_core_dev *mdev, u8 num_tc,
+ u64 max_rate[]);
+void mlx5e_mqprio_rl_cleanup(struct mlx5e_mqprio_rl *rl);
+int mlx5e_mqprio_rl_get_node_hw_id(struct mlx5e_mqprio_rl *rl, int tc, u32 *hw_id);
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
index de03684528bb..398c6761eeb3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
@@ -3,6 +3,7 @@
#include <net/dst_metadata.h>
#include <linux/netdevice.h>
+#include <linux/if_macvlan.h>
#include <linux/list.h>
#include <linux/rculist.h>
#include <linux/rtnetlink.h>
@@ -409,6 +410,13 @@ static void mlx5e_rep_indr_block_unbind(void *cb_priv)
static LIST_HEAD(mlx5e_block_cb_list);
+static bool mlx5e_rep_macvlan_mode_supported(const struct net_device *dev)
+{
+ struct macvlan_dev *macvlan = netdev_priv(dev);
+
+ return macvlan->mode == MACVLAN_MODE_PASSTHRU;
+}
+
static int
mlx5e_rep_indr_setup_block(struct net_device *netdev, struct Qdisc *sch,
struct mlx5e_rep_priv *rpriv,
@@ -422,8 +430,14 @@ mlx5e_rep_indr_setup_block(struct net_device *netdev, struct Qdisc *sch,
struct flow_block_cb *block_cb;
if (!mlx5e_tc_tun_device_to_offload(priv, netdev) &&
- !(is_vlan_dev(netdev) && vlan_dev_real_dev(netdev) == rpriv->netdev))
- return -EOPNOTSUPP;
+ !(is_vlan_dev(netdev) && vlan_dev_real_dev(netdev) == rpriv->netdev)) {
+ if (!(netif_is_macvlan(netdev) && macvlan_dev_real_dev(netdev) == rpriv->netdev))
+ return -EOPNOTSUPP;
+ if (!mlx5e_rep_macvlan_mode_supported(netdev)) {
+ netdev_warn(netdev, "Offloading ingress filter is supported only with macvlan passthru mode");
+ return -EOPNOTSUPP;
+ }
+ }
if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
return -EOPNOTSUPP;
@@ -647,9 +661,7 @@ static void mlx5e_restore_skb_sample(struct mlx5e_priv *priv, struct sk_buff *sk
"Failed to restore tunnel info for sampled packet\n");
return;
}
-#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE)
mlx5e_tc_sample_skb(skb, mapped_obj);
-#endif /* CONFIG_MLX5_TC_SAMPLE */
mlx5_rep_tc_post_napi_receive(tc_priv);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
index 0eb125316fe2..74086eb556ae 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
@@ -6,6 +6,7 @@
#include "txrx.h"
#include "devlink.h"
#include "ptp.h"
+#include "lib/tout.h"
static int mlx5e_query_rq_state(struct mlx5_core_dev *dev, u32 rqn, u8 *state)
{
@@ -32,8 +33,10 @@ out:
static int mlx5e_wait_for_icosq_flush(struct mlx5e_icosq *icosq)
{
- unsigned long exp_time = jiffies +
- msecs_to_jiffies(MLX5E_REPORTER_FLUSH_TIMEOUT_MSEC);
+ struct mlx5_core_dev *dev = icosq->channel->mdev;
+ unsigned long exp_time;
+
+ exp_time = jiffies + msecs_to_jiffies(mlx5_tout_ms(dev, FLUSH_ON_ERROR));
while (time_before(jiffies, exp_time)) {
if (icosq->cc == icosq->pc)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
index bb682fd751c9..4f4bc8726ec4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
@@ -4,11 +4,14 @@
#include "health.h"
#include "en/ptp.h"
#include "en/devlink.h"
+#include "lib/tout.h"
static int mlx5e_wait_for_sq_flush(struct mlx5e_txqsq *sq)
{
- unsigned long exp_time = jiffies +
- msecs_to_jiffies(MLX5E_REPORTER_FLUSH_TIMEOUT_MSEC);
+ struct mlx5_core_dev *dev = sq->mdev;
+ unsigned long exp_time;
+
+ exp_time = jiffies + msecs_to_jiffies(mlx5_tout_ms(dev, FLUSH_ON_ERROR));
while (time_before(jiffies, exp_time)) {
if (sq->cc == sq->pc)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c
index 625cd49ef96c..b8b481b335cf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c
@@ -391,7 +391,7 @@ int mlx5e_rss_obtain_tirn(struct mlx5e_rss *rss,
return 0;
}
-static void mlx5e_rss_apply(struct mlx5e_rss *rss, u32 *rqns, unsigned int num_rqns)
+static int mlx5e_rss_apply(struct mlx5e_rss *rss, u32 *rqns, unsigned int num_rqns)
{
int err;
@@ -399,6 +399,7 @@ static void mlx5e_rss_apply(struct mlx5e_rss *rss, u32 *rqns, unsigned int num_r
if (err)
mlx5e_rss_warn(rss->mdev, "Failed to redirect RQT %#x to channels: err = %d\n",
mlx5e_rqt_get_rqtn(&rss->rqt), err);
+ return err;
}
void mlx5e_rss_enable(struct mlx5e_rss *rss, u32 *rqns, unsigned int num_rqns)
@@ -490,6 +491,14 @@ int mlx5e_rss_set_rxfh(struct mlx5e_rss *rss, const u32 *indir,
{
bool changed_indir = false;
bool changed_hash = false;
+ struct mlx5e_rss *old_rss;
+ int err = 0;
+
+ old_rss = mlx5e_rss_alloc();
+ if (!old_rss)
+ return -ENOMEM;
+
+ *old_rss = *rss;
if (hfunc && *hfunc != rss->hash.hfunc) {
switch (*hfunc) {
@@ -497,7 +506,8 @@ int mlx5e_rss_set_rxfh(struct mlx5e_rss *rss, const u32 *indir,
case ETH_RSS_HASH_TOP:
break;
default:
- return -EINVAL;
+ err = -EINVAL;
+ goto out;
}
changed_hash = true;
changed_indir = true;
@@ -520,13 +530,20 @@ int mlx5e_rss_set_rxfh(struct mlx5e_rss *rss, const u32 *indir,
rss->indir.table[i] = indir[i];
}
- if (changed_indir && rss->enabled)
- mlx5e_rss_apply(rss, rqns, num_rqns);
+ if (changed_indir && rss->enabled) {
+ err = mlx5e_rss_apply(rss, rqns, num_rqns);
+ if (err) {
+ *rss = *old_rss;
+ goto out;
+ }
+ }
if (changed_hash)
mlx5e_rss_update_tirs(rss);
- return 0;
+out:
+ mlx5e_rss_free(old_rss);
+ return err;
}
struct mlx5e_rss_params_hash mlx5e_rss_get_hash(struct mlx5e_rss *rss)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c
index 6552ecee3f9b..d1d7e4b9f7ad 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c
@@ -602,7 +602,7 @@ mlx5e_tc_sample_offload(struct mlx5e_tc_psample *tc_psample,
}
sample_flow->pre_attr = pre_attr;
- return sample_flow->post_rule;
+ return sample_flow->pre_rule;
err_pre_offload_rule:
kfree(pre_attr);
@@ -613,7 +613,7 @@ err_sample_restore:
err_obj_id:
sampler_put(tc_psample, sample_flow->sampler);
err_sampler:
- if (!post_act_handle)
+ if (sample_flow->post_rule)
del_post_rule(esw, sample_flow, attr);
err_post_rule:
if (post_act_handle)
@@ -628,9 +628,7 @@ mlx5e_tc_sample_unoffload(struct mlx5e_tc_psample *tc_psample,
struct mlx5_flow_handle *rule,
struct mlx5_flow_attr *attr)
{
- struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
struct mlx5e_sample_flow *sample_flow;
- struct mlx5_vport_tbl_attr tbl_attr;
struct mlx5_eswitch *esw;
if (IS_ERR_OR_NULL(tc_psample))
@@ -650,23 +648,14 @@ mlx5e_tc_sample_unoffload(struct mlx5e_tc_psample *tc_psample,
*/
sample_flow = attr->sample_attr->sample_flow;
mlx5_eswitch_del_offloaded_rule(esw, sample_flow->pre_rule, sample_flow->pre_attr);
- if (!sample_flow->post_act_handle)
- mlx5_eswitch_del_offloaded_rule(esw, sample_flow->post_rule,
- sample_flow->post_attr);
sample_restore_put(tc_psample, sample_flow->restore);
mapping_remove(esw->offloads.reg_c0_obj_pool, attr->sample_attr->restore_obj_id);
sampler_put(tc_psample, sample_flow->sampler);
- if (sample_flow->post_act_handle) {
+ if (sample_flow->post_act_handle)
mlx5e_tc_post_act_del(tc_psample->post_act, sample_flow->post_act_handle);
- } else {
- tbl_attr.chain = attr->chain;
- tbl_attr.prio = attr->prio;
- tbl_attr.vport = esw_attr->in_rep->vport;
- tbl_attr.vport_ns = &mlx5_esw_vport_tbl_sample_ns;
- mlx5_esw_vporttbl_put(esw, &tbl_attr);
- kfree(sample_flow->post_attr);
- }
+ else
+ del_post_rule(esw, sample_flow, attr);
kfree(sample_flow->pre_attr);
kfree(sample_flow);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.h
index db0146df9b30..9ef8a49d7801 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.h
@@ -19,6 +19,8 @@ struct mlx5e_sample_attr {
struct mlx5e_sample_flow *sample_flow;
};
+#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE)
+
void mlx5e_tc_sample_skb(struct sk_buff *skb, struct mlx5_mapped_obj *mapped_obj);
struct mlx5_flow_handle *
@@ -38,4 +40,29 @@ mlx5e_tc_sample_init(struct mlx5_eswitch *esw, struct mlx5e_post_act *post_act);
void
mlx5e_tc_sample_cleanup(struct mlx5e_tc_psample *tc_psample);
+#else /* CONFIG_MLX5_TC_SAMPLE */
+
+static inline struct mlx5_flow_handle *
+mlx5e_tc_sample_offload(struct mlx5e_tc_psample *tc_psample,
+ struct mlx5_flow_spec *spec,
+ struct mlx5_flow_attr *attr,
+ u32 tunnel_id)
+{ return ERR_PTR(-EOPNOTSUPP); }
+
+static inline void
+mlx5e_tc_sample_unoffload(struct mlx5e_tc_psample *tc_psample,
+ struct mlx5_flow_handle *rule,
+ struct mlx5_flow_attr *attr) {}
+
+static inline struct mlx5e_tc_psample *
+mlx5e_tc_sample_init(struct mlx5_eswitch *esw, struct mlx5e_post_act *post_act)
+{ return ERR_PTR(-EOPNOTSUPP); }
+
+static inline void
+mlx5e_tc_sample_cleanup(struct mlx5e_tc_psample *tc_psample) {}
+
+static inline void
+mlx5e_tc_sample_skb(struct sk_buff *skb, struct mlx5_mapped_obj *mapped_obj) {}
+
+#endif /* CONFIG_MLX5_TC_SAMPLE */
#endif /* __MLX5_EN_TC_SAMPLE_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
index 6c949abcd2e1..740cd6f088b8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
@@ -2127,12 +2127,21 @@ mlx5_tc_ct_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains,
ct_priv->post_act = post_act;
mutex_init(&ct_priv->control_lock);
- rhashtable_init(&ct_priv->zone_ht, &zone_params);
- rhashtable_init(&ct_priv->ct_tuples_ht, &tuples_ht_params);
- rhashtable_init(&ct_priv->ct_tuples_nat_ht, &tuples_nat_ht_params);
+ if (rhashtable_init(&ct_priv->zone_ht, &zone_params))
+ goto err_ct_zone_ht;
+ if (rhashtable_init(&ct_priv->ct_tuples_ht, &tuples_ht_params))
+ goto err_ct_tuples_ht;
+ if (rhashtable_init(&ct_priv->ct_tuples_nat_ht, &tuples_nat_ht_params))
+ goto err_ct_tuples_nat_ht;
return ct_priv;
+err_ct_tuples_nat_ht:
+ rhashtable_destroy(&ct_priv->ct_tuples_ht);
+err_ct_tuples_ht:
+ rhashtable_destroy(&ct_priv->zone_ht);
+err_ct_zone_ht:
+ mlx5_chains_destroy_global_table(chains, ct_priv->ct_nat);
err_ct_nat_tbl:
mlx5_chains_destroy_global_table(chains, ct_priv->ct);
err_ct_tbl:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
index 4a13ef561587..b7461c17d601 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
@@ -120,6 +120,11 @@ static int mlx5e_route_lookup_ipv4_get(struct mlx5e_priv *priv,
uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH);
attr->fl.fl4.flowi4_oif = uplink_dev->ifindex;
+ } else {
+ struct mlx5e_tc_tunnel *tunnel = mlx5e_get_tc_tun(mirred_dev);
+
+ if (tunnel && tunnel->get_remote_ifindex)
+ attr->fl.fl4.flowi4_oif = tunnel->get_remote_ifindex(mirred_dev);
}
rt = ip_route_output_key(dev_net(mirred_dev), &attr->fl.fl4);
@@ -437,12 +442,15 @@ static int mlx5e_route_lookup_ipv6_get(struct mlx5e_priv *priv,
struct net_device *mirred_dev,
struct mlx5e_tc_tun_route_attr *attr)
{
+ struct mlx5e_tc_tunnel *tunnel = mlx5e_get_tc_tun(mirred_dev);
struct net_device *route_dev;
struct net_device *out_dev;
struct dst_entry *dst;
struct neighbour *n;
int ret;
+ if (tunnel && tunnel->get_remote_ifindex)
+ attr->fl.fl6.flowi6_oif = tunnel->get_remote_ifindex(mirred_dev);
dst = ipv6_stub->ipv6_dst_lookup_flow(dev_net(mirred_dev), NULL, &attr->fl.fl6,
NULL);
if (IS_ERR(dst))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
index 9350ca05ce65..aa092eaeaec3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
@@ -51,6 +51,7 @@ struct mlx5e_tc_tunnel {
void *headers_v);
bool (*encap_info_equal)(struct mlx5e_encap_key *a,
struct mlx5e_encap_key *b);
+ int (*get_remote_ifindex)(struct net_device *mirred_dev);
};
extern struct mlx5e_tc_tunnel vxlan_tunnel;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c
index 4267f3a1059e..fd07c4cbfd1d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c
@@ -141,6 +141,14 @@ static int mlx5e_tc_tun_parse_vxlan(struct mlx5e_priv *priv,
return 0;
}
+static int mlx5e_tc_tun_get_remote_ifindex(struct net_device *mirred_dev)
+{
+ const struct vxlan_dev *vxlan = netdev_priv(mirred_dev);
+ const struct vxlan_rdst *dst = &vxlan->default_dst;
+
+ return dst->remote_ifindex;
+}
+
struct mlx5e_tc_tunnel vxlan_tunnel = {
.tunnel_type = MLX5E_TC_TUNNEL_TYPE_VXLAN,
.match_level = MLX5_MATCH_L4,
@@ -151,4 +159,5 @@ struct mlx5e_tc_tunnel vxlan_tunnel = {
.parse_udp_ports = mlx5e_tc_tun_parse_udp_ports_vxlan,
.parse_tunnel = mlx5e_tc_tun_parse_vxlan,
.encap_info_equal = mlx5e_tc_tun_encap_info_equal_generic,
+ .get_remote_ifindex = mlx5e_tc_tun_get_remote_ifindex,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 9d451b8ee467..25926e581d18 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -267,9 +267,7 @@ void mlx5e_ethtool_get_strings(struct mlx5e_priv *priv, u32 stringset, u8 *data)
break;
case ETH_SS_TEST:
- for (i = 0; i < mlx5e_self_test_num(priv); i++)
- strcpy(data + i * ETH_GSTRING_LEN,
- mlx5e_self_tests[i]);
+ mlx5e_self_test_fill_strings(priv, data);
break;
case ETH_SS_STATS:
@@ -2139,12 +2137,14 @@ int mlx5e_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
return 0;
}
- return mlx5e_ethtool_get_rxnfc(dev, info, rule_locs);
+ return mlx5e_ethtool_get_rxnfc(priv, info, rule_locs);
}
int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
{
- return mlx5e_ethtool_set_rxnfc(dev, cmd);
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ return mlx5e_ethtool_set_rxnfc(priv, cmd);
}
static int query_port_status_opcode(struct mlx5_core_dev *mdev, u32 *status_opcode)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index d226cc5ab1d1..aeff1d972a46 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -71,12 +71,12 @@ struct mlx5e_l2_hash_node {
bool mpfs;
};
-static inline int mlx5e_hash_l2(u8 *addr)
+static inline int mlx5e_hash_l2(const u8 *addr)
{
return addr[5];
}
-static void mlx5e_add_l2_to_hash(struct hlist_head *hash, u8 *addr)
+static void mlx5e_add_l2_to_hash(struct hlist_head *hash, const u8 *addr)
{
struct mlx5e_l2_hash_node *hn;
int ix = mlx5e_hash_l2(addr);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
index 03693fa74a70..81ebf281cdb4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
@@ -937,9 +937,8 @@ static int mlx5e_get_rss_hash_opt(struct mlx5e_priv *priv,
return 0;
}
-int mlx5e_ethtool_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
+int mlx5e_ethtool_set_rxnfc(struct mlx5e_priv *priv, struct ethtool_rxnfc *cmd)
{
- struct mlx5e_priv *priv = netdev_priv(dev);
int err = 0;
switch (cmd->cmd) {
@@ -960,10 +959,9 @@ int mlx5e_ethtool_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
return err;
}
-int mlx5e_ethtool_get_rxnfc(struct net_device *dev,
+int mlx5e_ethtool_get_rxnfc(struct mlx5e_priv *priv,
struct ethtool_rxnfc *info, u32 *rule_locs)
{
- struct mlx5e_priv *priv = netdev_priv(dev);
int err = 0;
switch (info->cmd) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 41ef6eb70a58..0ff36c83714b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -930,9 +930,10 @@ static int mlx5e_alloc_xdpsq_fifo(struct mlx5e_xdpsq *sq, int numa)
struct mlx5e_xdp_info_fifo *xdpi_fifo = &sq->db.xdpi_fifo;
int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
int dsegs_per_wq = wq_sz * MLX5_SEND_WQEBB_NUM_DS;
+ size_t size;
- xdpi_fifo->xi = kvzalloc_node(sizeof(*xdpi_fifo->xi) * dsegs_per_wq,
- GFP_KERNEL, numa);
+ size = array_size(sizeof(*xdpi_fifo->xi), dsegs_per_wq);
+ xdpi_fifo->xi = kvzalloc_node(size, GFP_KERNEL, numa);
if (!xdpi_fifo->xi)
return -ENOMEM;
@@ -946,10 +947,11 @@ static int mlx5e_alloc_xdpsq_fifo(struct mlx5e_xdpsq *sq, int numa)
static int mlx5e_alloc_xdpsq_db(struct mlx5e_xdpsq *sq, int numa)
{
int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
+ size_t size;
int err;
- sq->db.wqe_info = kvzalloc_node(sizeof(*sq->db.wqe_info) * wq_sz,
- GFP_KERNEL, numa);
+ size = array_size(sizeof(*sq->db.wqe_info), wq_sz);
+ sq->db.wqe_info = kvzalloc_node(size, GFP_KERNEL, numa);
if (!sq->db.wqe_info)
return -ENOMEM;
@@ -1298,7 +1300,8 @@ static int mlx5e_set_sq_maxrate(struct net_device *dev,
int mlx5e_open_txqsq(struct mlx5e_channel *c, u32 tisn, int txq_ix,
struct mlx5e_params *params, struct mlx5e_sq_param *param,
- struct mlx5e_txqsq *sq, int tc, u16 qos_queue_group_id, u16 qos_qid)
+ struct mlx5e_txqsq *sq, int tc, u16 qos_queue_group_id,
+ struct mlx5e_sq_stats *sq_stats)
{
struct mlx5e_create_sq_param csp = {};
u32 tx_rate;
@@ -1308,10 +1311,7 @@ int mlx5e_open_txqsq(struct mlx5e_channel *c, u32 tisn, int txq_ix,
if (err)
return err;
- if (qos_queue_group_id)
- sq->stats = c->priv->htb.qos_sq_stats[qos_qid];
- else
- sq->stats = &c->priv->channel_stats[c->ix].sq[tc];
+ sq->stats = sq_stats;
csp.tisn = tisn;
csp.tis_lst_sz = 1;
@@ -1705,6 +1705,36 @@ static void mlx5e_close_tx_cqs(struct mlx5e_channel *c)
mlx5e_close_cq(&c->sq[tc].cq);
}
+static int mlx5e_mqprio_txq_to_tc(struct netdev_tc_txq *tc_to_txq, unsigned int txq)
+{
+ int tc;
+
+ for (tc = 0; tc < TC_MAX_QUEUE; tc++)
+ if (txq - tc_to_txq[tc].offset < tc_to_txq[tc].count)
+ return tc;
+
+ WARN(1, "Unexpected TCs configuration. No match found for txq %u", txq);
+ return -ENOENT;
+}
+
+static int mlx5e_txq_get_qos_node_hw_id(struct mlx5e_params *params, int txq_ix,
+ u32 *hw_id)
+{
+ int tc;
+
+ if (params->mqprio.mode != TC_MQPRIO_MODE_CHANNEL ||
+ !params->mqprio.channel.rl) {
+ *hw_id = 0;
+ return 0;
+ }
+
+ tc = mlx5e_mqprio_txq_to_tc(params->mqprio.tc_to_txq, txq_ix);
+ if (tc < 0)
+ return tc;
+
+ return mlx5e_mqprio_rl_get_node_hw_id(params->mqprio.channel.rl, tc, hw_id);
+}
+
static int mlx5e_open_sqs(struct mlx5e_channel *c,
struct mlx5e_params *params,
struct mlx5e_channel_param *cparam)
@@ -1713,9 +1743,16 @@ static int mlx5e_open_sqs(struct mlx5e_channel *c,
for (tc = 0; tc < mlx5e_get_dcb_num_tc(params); tc++) {
int txq_ix = c->ix + tc * params->num_channels;
+ u32 qos_queue_group_id;
+
+ err = mlx5e_txq_get_qos_node_hw_id(params, txq_ix, &qos_queue_group_id);
+ if (err)
+ goto err_close_sqs;
err = mlx5e_open_txqsq(c, c->priv->tisn[c->lag_port][tc], txq_ix,
- params, &cparam->txq_sq, &c->sq[tc], tc, 0, 0);
+ params, &cparam->txq_sq, &c->sq[tc], tc,
+ qos_queue_group_id,
+ &c->priv->channel_stats[c->ix].sq[tc]);
if (err)
goto err_close_sqs;
}
@@ -2340,6 +2377,13 @@ static int mlx5e_update_netdev_queues(struct mlx5e_priv *priv)
netdev_warn(netdev, "netif_set_real_num_rx_queues failed, %d\n", err);
goto err_txqs;
}
+ if (priv->mqprio_rl != priv->channels.params.mqprio.channel.rl) {
+ if (priv->mqprio_rl) {
+ mlx5e_mqprio_rl_cleanup(priv->mqprio_rl);
+ mlx5e_mqprio_rl_free(priv->mqprio_rl);
+ }
+ priv->mqprio_rl = priv->channels.params.mqprio.channel.rl;
+ }
return 0;
@@ -2901,15 +2945,18 @@ static void mlx5e_params_mqprio_dcb_set(struct mlx5e_params *params, u8 num_tc)
{
params->mqprio.mode = TC_MQPRIO_MODE_DCB;
params->mqprio.num_tc = num_tc;
+ params->mqprio.channel.rl = NULL;
mlx5e_mqprio_build_default_tc_to_txq(params->mqprio.tc_to_txq, num_tc,
params->num_channels);
}
static void mlx5e_params_mqprio_channel_set(struct mlx5e_params *params,
- struct tc_mqprio_qopt *qopt)
+ struct tc_mqprio_qopt *qopt,
+ struct mlx5e_mqprio_rl *rl)
{
params->mqprio.mode = TC_MQPRIO_MODE_CHANNEL;
params->mqprio.num_tc = qopt->num_tc;
+ params->mqprio.channel.rl = rl;
mlx5e_mqprio_build_tc_to_txq(params->mqprio.tc_to_txq, qopt);
}
@@ -2969,9 +3016,13 @@ static int mlx5e_mqprio_channel_validate(struct mlx5e_priv *priv,
netdev_err(netdev, "Min tx rate is not supported\n");
return -EINVAL;
}
+
if (mqprio->max_rate[i]) {
- netdev_err(netdev, "Max tx rate is not supported\n");
- return -EINVAL;
+ int err;
+
+ err = mlx5e_qos_bytes_rate_check(priv->mdev, mqprio->max_rate[i]);
+ if (err)
+ return err;
}
if (mqprio->qopt.offset[i] != agg_count) {
@@ -2990,11 +3041,22 @@ static int mlx5e_mqprio_channel_validate(struct mlx5e_priv *priv,
return 0;
}
+static bool mlx5e_mqprio_rate_limit(struct tc_mqprio_qopt_offload *mqprio)
+{
+ int tc;
+
+ for (tc = 0; tc < mqprio->qopt.num_tc; tc++)
+ if (mqprio->max_rate[tc])
+ return true;
+ return false;
+}
+
static int mlx5e_setup_tc_mqprio_channel(struct mlx5e_priv *priv,
struct tc_mqprio_qopt_offload *mqprio)
{
mlx5e_fp_preactivate preactivate;
struct mlx5e_params new_params;
+ struct mlx5e_mqprio_rl *rl;
bool nch_changed;
int err;
@@ -3002,13 +3064,32 @@ static int mlx5e_setup_tc_mqprio_channel(struct mlx5e_priv *priv,
if (err)
return err;
+ rl = NULL;
+ if (mlx5e_mqprio_rate_limit(mqprio)) {
+ rl = mlx5e_mqprio_rl_alloc();
+ if (!rl)
+ return -ENOMEM;
+ err = mlx5e_mqprio_rl_init(rl, priv->mdev, mqprio->qopt.num_tc,
+ mqprio->max_rate);
+ if (err) {
+ mlx5e_mqprio_rl_free(rl);
+ return err;
+ }
+ }
+
new_params = priv->channels.params;
- mlx5e_params_mqprio_channel_set(&new_params, &mqprio->qopt);
+ mlx5e_params_mqprio_channel_set(&new_params, &mqprio->qopt, rl);
nch_changed = mlx5e_get_dcb_num_tc(&priv->channels.params) > 1;
preactivate = nch_changed ? mlx5e_num_channels_changed_ctx :
mlx5e_update_netdev_queues_ctx;
- return mlx5e_safe_switch_params(priv, &new_params, preactivate, NULL, true);
+ err = mlx5e_safe_switch_params(priv, &new_params, preactivate, NULL, true);
+ if (err && rl) {
+ mlx5e_mqprio_rl_cleanup(rl);
+ mlx5e_mqprio_rl_free(rl);
+ }
+
+ return err;
}
static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv,
@@ -3226,7 +3307,7 @@ static int mlx5e_set_mac(struct net_device *netdev, void *addr)
return -EADDRNOTAVAIL;
netif_addr_lock_bh(netdev);
- ether_addr_copy(netdev->dev_addr, saddr->sa_data);
+ eth_hw_addr_set(netdev, saddr->sa_data);
netif_addr_unlock_bh(netdev);
mlx5e_nic_set_rx_mode(priv);
@@ -4862,6 +4943,11 @@ void mlx5e_priv_cleanup(struct mlx5e_priv *priv)
kfree(priv->htb.qos_sq_stats[i]);
kvfree(priv->htb.qos_sq_stats);
+ if (priv->mqprio_rl) {
+ mlx5e_mqprio_rl_cleanup(priv->mqprio_rl);
+ mlx5e_mqprio_rl_free(priv->mqprio_rl);
+ }
+
memset(priv, 0, sizeof(*priv));
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
index ce8ab1f01876..8c9163d2c646 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
@@ -35,30 +35,7 @@
#include <net/udp.h>
#include "en.h"
#include "en/port.h"
-
-enum {
- MLX5E_ST_LINK_STATE,
- MLX5E_ST_LINK_SPEED,
- MLX5E_ST_HEALTH_INFO,
-#ifdef CONFIG_INET
- MLX5E_ST_LOOPBACK,
-#endif
- MLX5E_ST_NUM,
-};
-
-const char mlx5e_self_tests[MLX5E_ST_NUM][ETH_GSTRING_LEN] = {
- "Link Test",
- "Speed Test",
- "Health Test",
-#ifdef CONFIG_INET
- "Loopback Test",
-#endif
-};
-
-int mlx5e_self_test_num(struct mlx5e_priv *priv)
-{
- return ARRAY_SIZE(mlx5e_self_tests);
-}
+#include "eswitch.h"
static int mlx5e_test_health_info(struct mlx5e_priv *priv)
{
@@ -265,6 +242,14 @@ static void mlx5e_test_loopback_cleanup(struct mlx5e_priv *priv,
mlx5e_refresh_tirs(priv, false, false);
}
+static int mlx5e_cond_loopback(struct mlx5e_priv *priv)
+{
+ if (is_mdev_switchdev_mode(priv->mdev))
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
#define MLX5E_LB_VERIFY_TIMEOUT (msecs_to_jiffies(200))
static int mlx5e_test_loopback(struct mlx5e_priv *priv)
{
@@ -313,37 +298,47 @@ out:
}
#endif
-static int (*mlx5e_st_func[MLX5E_ST_NUM])(struct mlx5e_priv *) = {
- mlx5e_test_link_state,
- mlx5e_test_link_speed,
- mlx5e_test_health_info,
+typedef int (*mlx5e_st_func)(struct mlx5e_priv *);
+
+struct mlx5e_st {
+ char name[ETH_GSTRING_LEN];
+ mlx5e_st_func st_func;
+ mlx5e_st_func cond_func;
+};
+
+static struct mlx5e_st mlx5e_sts[] = {
+ { "Link Test", mlx5e_test_link_state },
+ { "Speed Test", mlx5e_test_link_speed },
+ { "Health Test", mlx5e_test_health_info },
#ifdef CONFIG_INET
- mlx5e_test_loopback,
+ { "Loopback Test", mlx5e_test_loopback, mlx5e_cond_loopback },
#endif
};
+#define MLX5E_ST_NUM ARRAY_SIZE(mlx5e_sts)
+
void mlx5e_self_test(struct net_device *ndev, struct ethtool_test *etest,
u64 *buf)
{
struct mlx5e_priv *priv = netdev_priv(ndev);
- int i;
-
- memset(buf, 0, sizeof(u64) * MLX5E_ST_NUM);
+ int i, count = 0;
mutex_lock(&priv->state_lock);
netdev_info(ndev, "Self test begin..\n");
for (i = 0; i < MLX5E_ST_NUM; i++) {
- netdev_info(ndev, "\t[%d] %s start..\n",
- i, mlx5e_self_tests[i]);
- buf[i] = mlx5e_st_func[i](priv);
- netdev_info(ndev, "\t[%d] %s end: result(%lld)\n",
- i, mlx5e_self_tests[i], buf[i]);
+ struct mlx5e_st st = mlx5e_sts[i];
+
+ if (st.cond_func && st.cond_func(priv))
+ continue;
+ netdev_info(ndev, "\t[%d] %s start..\n", i, st.name);
+ buf[count] = st.st_func(priv);
+ netdev_info(ndev, "\t[%d] %s end: result(%lld)\n", i, st.name, buf[count]);
}
mutex_unlock(&priv->state_lock);
- for (i = 0; i < MLX5E_ST_NUM; i++) {
+ for (i = 0; i < count; i++) {
if (buf[i]) {
etest->flags |= ETH_TEST_FL_FAILED;
break;
@@ -352,3 +347,24 @@ void mlx5e_self_test(struct net_device *ndev, struct ethtool_test *etest,
netdev_info(ndev, "Self test out: status flags(0x%x)\n",
etest->flags);
}
+
+int mlx5e_self_test_fill_strings(struct mlx5e_priv *priv, u8 *data)
+{
+ int i, count = 0;
+
+ for (i = 0; i < MLX5E_ST_NUM; i++) {
+ struct mlx5e_st st = mlx5e_sts[i];
+
+ if (st.cond_func && st.cond_func(priv))
+ continue;
+ if (data)
+ strcpy(data + count * ETH_GSTRING_LEN, st.name);
+ count++;
+ }
+ return count;
+}
+
+int mlx5e_self_test_num(struct mlx5e_priv *priv)
+{
+ return mlx5e_self_test_fill_strings(priv, NULL);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 129ff7e0d65c..57369925a788 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -39,6 +39,7 @@
#include <linux/rhashtable.h>
#include <linux/refcount.h>
#include <linux/completion.h>
+#include <linux/if_macvlan.h>
#include <net/tc_act/tc_pedit.h>
#include <net/tc_act/tc_csum.h>
#include <net/psample.h>
@@ -248,7 +249,6 @@ get_ct_priv(struct mlx5e_priv *priv)
return priv->fs.tc.ct;
}
-#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE)
static struct mlx5e_tc_psample *
get_sample_priv(struct mlx5e_priv *priv)
{
@@ -265,7 +265,6 @@ get_sample_priv(struct mlx5e_priv *priv)
return NULL;
}
-#endif
struct mlx5_flow_handle *
mlx5_tc_rule_insert(struct mlx5e_priv *priv,
@@ -1148,11 +1147,9 @@ mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
rule = mlx5_tc_ct_flow_offload(get_ct_priv(flow->priv),
flow, spec, attr,
mod_hdr_acts);
-#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE)
} else if (flow_flag_test(flow, SAMPLE)) {
rule = mlx5e_tc_sample_offload(get_sample_priv(flow->priv), spec, attr,
mlx5e_tc_get_flow_tun_id(flow));
-#endif
} else {
rule = mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
}
@@ -1188,12 +1185,10 @@ void mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw,
return;
}
-#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE)
if (flow_flag_test(flow, SAMPLE)) {
mlx5e_tc_sample_unoffload(get_sample_priv(flow->priv), flow->rule[0], attr);
return;
}
-#endif
if (attr->esw_attr->split_count)
mlx5_eswitch_del_fwd_rule(esw, flow->rule[1], attr);
@@ -1690,8 +1685,8 @@ enc_opts_is_dont_care_or_full_match(struct mlx5e_priv *priv,
if (opt->opt_class != htons(U16_MAX) ||
opt->type != U8_MAX) {
- NL_SET_ERR_MSG(extack,
- "Partial match of tunnel options in chain > 0 isn't supported");
+ NL_SET_ERR_MSG_MOD(extack,
+ "Partial match of tunnel options in chain > 0 isn't supported");
netdev_warn(priv->netdev,
"Partial match of tunnel options in chain > 0 isn't supported");
return -EOPNOTSUPP;
@@ -1898,8 +1893,10 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
bool needs_mapping, sets_mapping;
int err;
- if (!mlx5e_is_eswitch_flow(flow))
+ if (!mlx5e_is_eswitch_flow(flow)) {
+ NL_SET_ERR_MSG_MOD(extack, "Match on tunnel is not supported");
return -EOPNOTSUPP;
+ }
needs_mapping = !!flow->attr->chain;
sets_mapping = flow_requires_tunnel_mapping(flow->attr->chain, f);
@@ -1907,8 +1904,8 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
if ((needs_mapping || sets_mapping) &&
!mlx5_eswitch_reg_c1_loopback_enabled(esw)) {
- NL_SET_ERR_MSG(extack,
- "Chains on tunnel devices isn't supported without register loopback support");
+ NL_SET_ERR_MSG_MOD(extack,
+ "Chains on tunnel devices isn't supported without register loopback support");
netdev_warn(priv->netdev,
"Chains on tunnel devices isn't supported without register loopback support");
return -EOPNOTSUPP;
@@ -2271,8 +2268,10 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
addr_type = match.key->addr_type;
/* the HW doesn't support frag first/later */
- if (match.mask->flags & FLOW_DIS_FIRST_FRAG)
+ if (match.mask->flags & FLOW_DIS_FIRST_FRAG) {
+ NL_SET_ERR_MSG_MOD(extack, "Match on frag first/later is not supported");
return -EOPNOTSUPP;
+ }
if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) {
MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
@@ -2439,8 +2438,11 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
switch (ip_proto) {
case IPPROTO_ICMP:
if (!(MLX5_CAP_GEN(priv->mdev, flex_parser_protocols) &
- MLX5_FLEX_PROTO_ICMP))
+ MLX5_FLEX_PROTO_ICMP)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Match on Flex protocols for ICMP is not supported");
return -EOPNOTSUPP;
+ }
MLX5_SET(fte_match_set_misc3, misc_c_3, icmp_type,
match.mask->type);
MLX5_SET(fte_match_set_misc3, misc_v_3, icmp_type,
@@ -2452,8 +2454,11 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
break;
case IPPROTO_ICMPV6:
if (!(MLX5_CAP_GEN(priv->mdev, flex_parser_protocols) &
- MLX5_FLEX_PROTO_ICMPV6))
+ MLX5_FLEX_PROTO_ICMPV6)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Match on Flex protocols for ICMPV6 is not supported");
return -EOPNOTSUPP;
+ }
MLX5_SET(fte_match_set_misc3, misc_c_3, icmpv6_type,
match.mask->type);
MLX5_SET(fte_match_set_misc3, misc_v_3, icmpv6_type,
@@ -2559,15 +2564,19 @@ static int pedit_header_offsets[] = {
#define pedit_header(_ph, _htype) ((void *)(_ph) + pedit_header_offsets[_htype])
static int set_pedit_val(u8 hdr_type, u32 mask, u32 val, u32 offset,
- struct pedit_headers_action *hdrs)
+ struct pedit_headers_action *hdrs,
+ struct netlink_ext_ack *extack)
{
u32 *curr_pmask, *curr_pval;
curr_pmask = (u32 *)(pedit_header(&hdrs->masks, hdr_type) + offset);
curr_pval = (u32 *)(pedit_header(&hdrs->vals, hdr_type) + offset);
- if (*curr_pmask & mask) /* disallow acting twice on the same location */
+ if (*curr_pmask & mask) { /* disallow acting twice on the same location */
+ NL_SET_ERR_MSG_MOD(extack,
+ "curr_pmask and new mask same. Acting twice on same location");
goto out_err;
+ }
*curr_pmask |= mask;
*curr_pval |= (val & mask);
@@ -2900,7 +2909,7 @@ parse_pedit_to_modify_hdr(struct mlx5e_priv *priv,
val = act->mangle.val;
offset = act->mangle.offset;
- err = set_pedit_val(htype, ~mask, val, offset, &hdrs[cmd]);
+ err = set_pedit_val(htype, ~mask, val, offset, &hdrs[cmd], extack);
if (err)
goto out_err;
@@ -2912,16 +2921,17 @@ out_err:
}
static int
-parse_pedit_to_reformat(struct mlx5e_priv *priv,
- const struct flow_action_entry *act,
+parse_pedit_to_reformat(const struct flow_action_entry *act,
struct mlx5e_tc_flow_parse_attr *parse_attr,
struct netlink_ext_ack *extack)
{
u32 mask, val, offset;
u32 *p;
- if (act->id != FLOW_ACTION_MANGLE)
+ if (act->id != FLOW_ACTION_MANGLE) {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported action id");
return -EOPNOTSUPP;
+ }
if (act->mangle.htype != FLOW_ACT_MANGLE_HDR_TYPE_ETH) {
NL_SET_ERR_MSG_MOD(extack, "Only Ethernet modification is supported");
@@ -2945,7 +2955,7 @@ static int parse_tc_pedit_action(struct mlx5e_priv *priv,
struct netlink_ext_ack *extack)
{
if (flow && flow_flag_test(flow, L3_TO_L2_DECAP))
- return parse_pedit_to_reformat(priv, act, parse_attr, extack);
+ return parse_pedit_to_reformat(act, parse_attr, extack);
return parse_pedit_to_modify_hdr(priv, act, namespace,
parse_attr, hdrs, extack);
@@ -3027,10 +3037,10 @@ struct ipv6_hoplimit_word {
__u8 hop_limit;
};
-static int is_action_keys_supported(const struct flow_action_entry *act,
- bool ct_flow, bool *modify_ip_header,
- bool *modify_tuple,
- struct netlink_ext_ack *extack)
+static bool
+is_action_keys_supported(const struct flow_action_entry *act, bool ct_flow,
+ bool *modify_ip_header, bool *modify_tuple,
+ struct netlink_ext_ack *extack)
{
u32 mask, offset;
u8 htype;
@@ -3058,7 +3068,7 @@ static int is_action_keys_supported(const struct flow_action_entry *act,
if (ct_flow && *modify_tuple) {
NL_SET_ERR_MSG_MOD(extack,
"can't offload re-write of ipv4 address with action ct");
- return -EOPNOTSUPP;
+ return false;
}
} else if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP6) {
struct ipv6_hoplimit_word *hoplimit_word =
@@ -3076,7 +3086,7 @@ static int is_action_keys_supported(const struct flow_action_entry *act,
if (ct_flow && *modify_tuple) {
NL_SET_ERR_MSG_MOD(extack,
"can't offload re-write of ipv6 address with action ct");
- return -EOPNOTSUPP;
+ return false;
}
} else if (htype == FLOW_ACT_MANGLE_HDR_TYPE_TCP ||
htype == FLOW_ACT_MANGLE_HDR_TYPE_UDP) {
@@ -3084,11 +3094,11 @@ static int is_action_keys_supported(const struct flow_action_entry *act,
if (ct_flow) {
NL_SET_ERR_MSG_MOD(extack,
"can't offload re-write of transport header ports with action ct");
- return -EOPNOTSUPP;
+ return false;
}
}
- return 0;
+ return true;
}
static bool modify_tuple_supported(bool modify_tuple, bool ct_clear,
@@ -3135,7 +3145,7 @@ static bool modify_header_match_supported(struct mlx5e_priv *priv,
void *headers_v;
u16 ethertype;
u8 ip_proto;
- int i, err;
+ int i;
headers_c = get_match_headers_criteria(actions, spec);
headers_v = get_match_headers_value(actions, spec);
@@ -3153,11 +3163,10 @@ static bool modify_header_match_supported(struct mlx5e_priv *priv,
act->id != FLOW_ACTION_ADD)
continue;
- err = is_action_keys_supported(act, ct_flow,
- &modify_ip_header,
- &modify_tuple, extack);
- if (err)
- return err;
+ if (!is_action_keys_supported(act, ct_flow,
+ &modify_ip_header,
+ &modify_tuple, extack))
+ return false;
}
if (!modify_tuple_supported(modify_tuple, ct_clear, ct_flow, extack,
@@ -3178,37 +3187,65 @@ out_ok:
return true;
}
-static bool actions_match_supported(struct mlx5e_priv *priv,
- struct flow_action *flow_action,
- struct mlx5e_tc_flow_parse_attr *parse_attr,
- struct mlx5e_tc_flow *flow,
- struct netlink_ext_ack *extack)
+static bool
+actions_match_supported_fdb(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow_parse_attr *parse_attr,
+ struct mlx5e_tc_flow *flow,
+ struct netlink_ext_ack *extack)
{
- bool ct_flow = false, ct_clear = false;
- u32 actions;
+ struct mlx5_esw_flow_attr *esw_attr = flow->attr->esw_attr;
+ bool ct_flow, ct_clear;
- ct_clear = flow->attr->ct_attr.ct_action &
- TCA_CT_ACT_CLEAR;
+ ct_clear = flow->attr->ct_attr.ct_action & TCA_CT_ACT_CLEAR;
ct_flow = flow_flag_test(flow, CT) && !ct_clear;
- actions = flow->attr->action;
- if (mlx5e_is_eswitch_flow(flow)) {
- if (flow->attr->esw_attr->split_count && ct_flow &&
- !MLX5_CAP_GEN(flow->attr->esw_attr->in_mdev, reg_c_preserve)) {
- /* All registers used by ct are cleared when using
- * split rules.
- */
- NL_SET_ERR_MSG_MOD(extack,
- "Can't offload mirroring with action ct");
- return false;
- }
+ if (esw_attr->split_count && ct_flow &&
+ !MLX5_CAP_GEN(esw_attr->in_mdev, reg_c_preserve)) {
+ /* All registers used by ct are cleared when using
+ * split rules.
+ */
+ NL_SET_ERR_MSG_MOD(extack, "Can't offload mirroring with action ct");
+ return false;
}
- if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
- return modify_header_match_supported(priv, &parse_attr->spec,
- flow_action, actions,
- ct_flow, ct_clear,
- extack);
+ if (esw_attr->split_count > 0 && !mlx5_esw_has_fwd_fdb(priv->mdev)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "current firmware doesn't support split rule for port mirroring");
+ netdev_warn_once(priv->netdev,
+ "current firmware doesn't support split rule for port mirroring\n");
+ return false;
+ }
+
+ return true;
+}
+
+static bool
+actions_match_supported(struct mlx5e_priv *priv,
+ struct flow_action *flow_action,
+ struct mlx5e_tc_flow_parse_attr *parse_attr,
+ struct mlx5e_tc_flow *flow,
+ struct netlink_ext_ack *extack)
+{
+ u32 actions = flow->attr->action;
+ bool ct_flow, ct_clear;
+
+ ct_clear = flow->attr->ct_attr.ct_action & TCA_CT_ACT_CLEAR;
+ ct_flow = flow_flag_test(flow, CT) && !ct_clear;
+
+ if (!(actions &
+ (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_DROP))) {
+ NL_SET_ERR_MSG_MOD(extack, "Rule must have at least one forward/drop action");
+ return false;
+ }
+
+ if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR &&
+ !modify_header_match_supported(priv, &parse_attr->spec, flow_action,
+ actions, ct_flow, ct_clear, extack))
+ return false;
+
+ if (mlx5e_is_eswitch_flow(flow) &&
+ !actions_match_supported_fdb(priv, parse_attr, flow, extack))
+ return false;
return true;
}
@@ -3357,11 +3394,51 @@ static int validate_goto_chain(struct mlx5e_priv *priv,
return 0;
}
-static int parse_tc_nic_actions(struct mlx5e_priv *priv,
- struct flow_action *flow_action,
+static int
+actions_prepare_mod_hdr_actions(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_attr *attr,
+ struct pedit_headers_action *hdrs,
struct netlink_ext_ack *extack)
{
+ struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr;
+ enum mlx5_flow_namespace_type ns_type;
+ int err;
+
+ if (!hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits &&
+ !hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits)
+ return 0;
+
+ ns_type = get_flow_name_space(flow);
+
+ err = alloc_tc_pedit_action(priv, ns_type, parse_attr, hdrs,
+ &attr->action, extack);
+ if (err)
+ return err;
+
+ /* In case all pedit actions are skipped, remove the MOD_HDR flag. */
+ if (parse_attr->mod_hdr_acts.num_actions > 0)
+ return 0;
+
+ attr->action &= ~MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+ dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
+
+ if (ns_type != MLX5_FLOW_NAMESPACE_FDB)
+ return 0;
+
+ if (!((attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) ||
+ (attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH)))
+ attr->esw_attr->split_count = 0;
+
+ return 0;
+}
+
+static int
+parse_tc_nic_actions(struct mlx5e_priv *priv,
+ struct flow_action *flow_action,
+ struct mlx5e_tc_flow *flow,
+ struct netlink_ext_ack *extack)
+{
struct mlx5e_tc_flow_parse_attr *parse_attr;
struct mlx5_flow_attr *attr = flow->attr;
struct pedit_headers_action hdrs[2] = {};
@@ -3370,12 +3447,16 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv,
u32 action = 0;
int err, i;
- if (!flow_action_has_entries(flow_action))
+ if (!flow_action_has_entries(flow_action)) {
+ NL_SET_ERR_MSG_MOD(extack, "Flow action doesn't have any entries");
return -EINVAL;
+ }
if (!flow_action_hw_stats_check(flow_action, extack,
- FLOW_ACTION_HW_STATS_DELAYED_BIT))
+ FLOW_ACTION_HW_STATS_DELAYED_BIT)) {
+ NL_SET_ERR_MSG_MOD(extack, "Flow action HW stats type is not supported");
return -EOPNOTSUPP;
+ }
nic_attr = attr->nic_attr;
nic_attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
@@ -3453,7 +3534,8 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv,
if (err)
return err;
- action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
attr->dest_chain = act->chain_index;
break;
case FLOW_ACTION_CT:
@@ -3464,38 +3546,22 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv,
flow_flag_set(flow, CT);
break;
default:
- NL_SET_ERR_MSG_MOD(extack, "The offload action is not supported");
+ NL_SET_ERR_MSG_MOD(extack,
+ "The offload action is not supported in NIC action");
return -EOPNOTSUPP;
}
}
- if (hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits ||
- hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) {
- err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_KERNEL,
- parse_attr, hdrs, &action, extack);
- if (err)
- return err;
- /* in case all pedit actions are skipped, remove the MOD_HDR
- * flag.
- */
- if (parse_attr->mod_hdr_acts.num_actions == 0) {
- action &= ~MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
- dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
- }
- }
-
attr->action = action;
- if (attr->dest_chain) {
- if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
- NL_SET_ERR_MSG(extack, "Mirroring goto chain rules isn't supported");
- return -EOPNOTSUPP;
- }
- attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ if (attr->dest_chain && parse_attr->mirred_ifindex[0]) {
+ NL_SET_ERR_MSG(extack, "Mirroring goto chain rules isn't supported");
+ return -EOPNOTSUPP;
}
- if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
- attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ err = actions_prepare_mod_hdr_actions(priv, flow, attr, hdrs, extack);
+ if (err)
+ return err;
if (!actions_match_supported(priv, flow_action, parse_attr, flow, extack))
return -EOPNOTSUPP;
@@ -3519,19 +3585,25 @@ static bool is_merged_eswitch_vfs(struct mlx5e_priv *priv,
static int parse_tc_vlan_action(struct mlx5e_priv *priv,
const struct flow_action_entry *act,
struct mlx5_esw_flow_attr *attr,
- u32 *action)
+ u32 *action,
+ struct netlink_ext_ack *extack)
{
u8 vlan_idx = attr->total_vlan;
- if (vlan_idx >= MLX5_FS_VLAN_DEPTH)
+ if (vlan_idx >= MLX5_FS_VLAN_DEPTH) {
+ NL_SET_ERR_MSG_MOD(extack, "Total vlans used is greater than supported");
return -EOPNOTSUPP;
+ }
switch (act->id) {
case FLOW_ACTION_VLAN_POP:
if (vlan_idx) {
if (!mlx5_eswitch_vlan_actions_supported(priv->mdev,
- MLX5_FS_VLAN_DEPTH))
+ MLX5_FS_VLAN_DEPTH)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "vlan pop action is not supported");
return -EOPNOTSUPP;
+ }
*action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2;
} else {
@@ -3547,20 +3619,27 @@ static int parse_tc_vlan_action(struct mlx5e_priv *priv,
if (vlan_idx) {
if (!mlx5_eswitch_vlan_actions_supported(priv->mdev,
- MLX5_FS_VLAN_DEPTH))
+ MLX5_FS_VLAN_DEPTH)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "vlan push action is not supported for vlan depth > 1");
return -EOPNOTSUPP;
+ }
*action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2;
} else {
if (!mlx5_eswitch_vlan_actions_supported(priv->mdev, 1) &&
(act->vlan.proto != htons(ETH_P_8021Q) ||
- act->vlan.prio))
+ act->vlan.prio)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "vlan push action is not supported");
return -EOPNOTSUPP;
+ }
*action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
}
break;
default:
+ NL_SET_ERR_MSG_MOD(extack, "Unexpected action id for VLAN");
return -EINVAL;
}
@@ -3594,7 +3673,8 @@ static struct net_device *get_fdb_out_dev(struct net_device *uplink_dev,
static int add_vlan_push_action(struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr,
struct net_device **out_dev,
- u32 *action)
+ u32 *action,
+ struct netlink_ext_ack *extack)
{
struct net_device *vlan_dev = *out_dev;
struct flow_action_entry vlan_act = {
@@ -3605,7 +3685,7 @@ static int add_vlan_push_action(struct mlx5e_priv *priv,
};
int err;
- err = parse_tc_vlan_action(priv, &vlan_act, attr->esw_attr, action);
+ err = parse_tc_vlan_action(priv, &vlan_act, attr->esw_attr, action, extack);
if (err)
return err;
@@ -3616,14 +3696,15 @@ static int add_vlan_push_action(struct mlx5e_priv *priv,
return -ENODEV;
if (is_vlan_dev(*out_dev))
- err = add_vlan_push_action(priv, attr, out_dev, action);
+ err = add_vlan_push_action(priv, attr, out_dev, action, extack);
return err;
}
static int add_vlan_pop_action(struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr,
- u32 *action)
+ u32 *action,
+ struct netlink_ext_ack *extack)
{
struct flow_action_entry vlan_act = {
.id = FLOW_ACTION_VLAN_POP,
@@ -3633,7 +3714,7 @@ static int add_vlan_pop_action(struct mlx5e_priv *priv,
nest_level = attr->parse_attr->filter_dev->lower_level -
priv->netdev->lower_level;
while (nest_level--) {
- err = parse_tc_vlan_action(priv, &vlan_act, attr->esw_attr, action);
+ err = parse_tc_vlan_action(priv, &vlan_act, attr->esw_attr, action, extack);
if (err)
return err;
}
@@ -3755,18 +3836,27 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
int err, i, if_count = 0;
bool mpls_push = false;
- if (!flow_action_has_entries(flow_action))
+ if (!flow_action_has_entries(flow_action)) {
+ NL_SET_ERR_MSG_MOD(extack, "Flow action doesn't have any entries");
return -EINVAL;
+ }
if (!flow_action_hw_stats_check(flow_action, extack,
- FLOW_ACTION_HW_STATS_DELAYED_BIT))
+ FLOW_ACTION_HW_STATS_DELAYED_BIT)) {
+ NL_SET_ERR_MSG_MOD(extack, "Flow action HW stats type is not supported");
return -EOPNOTSUPP;
+ }
esw_attr = attr->esw_attr;
parse_attr = attr->parse_attr;
flow_action_for_each(i, act, flow_action) {
switch (act->id) {
+ case FLOW_ACTION_ACCEPT:
+ action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ attr->flags |= MLX5_ESW_ATTR_FLAG_ACCEPT;
+ break;
case FLOW_ACTION_DROP:
action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
@@ -3904,18 +3994,21 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
if (is_vlan_dev(out_dev)) {
err = add_vlan_push_action(priv, attr,
&out_dev,
- &action);
+ &action, extack);
if (err)
return err;
}
if (is_vlan_dev(parse_attr->filter_dev)) {
err = add_vlan_pop_action(priv, attr,
- &action);
+ &action, extack);
if (err)
return err;
}
+ if (netif_is_macvlan(out_dev))
+ out_dev = macvlan_dev_real_dev(out_dev);
+
err = verify_uplink_forwarding(priv, flow, out_dev, extack);
if (err)
return err;
@@ -3957,10 +4050,13 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
break;
case FLOW_ACTION_TUNNEL_ENCAP:
info = act->tunnel;
- if (info)
+ if (info) {
encap = true;
- else
+ } else {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Zero tunnel attributes is not supported");
return -EOPNOTSUPP;
+ }
break;
case FLOW_ACTION_VLAN_PUSH:
@@ -3974,7 +4070,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
act, parse_attr, hdrs,
&action, extack);
} else {
- err = parse_tc_vlan_action(priv, act, esw_attr, &action);
+ err = parse_tc_vlan_action(priv, act, esw_attr, &action, extack);
}
if (err)
return err;
@@ -4000,7 +4096,8 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
if (err)
return err;
- action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
attr->dest_chain = act->chain_index;
break;
case FLOW_ACTION_CT:
@@ -4027,7 +4124,8 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
flow_flag_set(flow, SAMPLE);
break;
default:
- NL_SET_ERR_MSG_MOD(extack, "The offload action is not supported");
+ NL_SET_ERR_MSG_MOD(extack,
+ "The offload action is not supported in FDB action");
return -EOPNOTSUPP;
}
}
@@ -4047,60 +4145,26 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
return err;
}
- if (hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits ||
- hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) {
- err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_FDB,
- parse_attr, hdrs, &action, extack);
- if (err)
- return err;
- /* in case all pedit actions are skipped, remove the MOD_HDR
- * flag. we might have set split_count either by pedit or
- * pop/push. if there is no pop/push either, reset it too.
- */
- if (parse_attr->mod_hdr_acts.num_actions == 0) {
- action &= ~MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
- dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
- if (!((action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) ||
- (action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH)))
- esw_attr->split_count = 0;
- }
- }
-
attr->action = action;
- if (!actions_match_supported(priv, flow_action, parse_attr, flow, extack))
- return -EOPNOTSUPP;
-
- if (attr->dest_chain) {
- if (decap) {
- /* It can be supported if we'll create a mapping for
- * the tunnel device only (without tunnel), and set
- * this tunnel id with this decap flow.
- *
- * On restore (miss), we'll just set this saved tunnel
- * device.
- */
- NL_SET_ERR_MSG(extack,
- "Decap with goto isn't supported");
- netdev_warn(priv->netdev,
- "Decap with goto isn't supported");
- return -EOPNOTSUPP;
- }
-
- attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
- }
+ err = actions_prepare_mod_hdr_actions(priv, flow, attr, hdrs, extack);
+ if (err)
+ return err;
- if (!(attr->action &
- (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_DROP))) {
- NL_SET_ERR_MSG_MOD(extack,
- "Rule must have at least one forward/drop action");
+ if (!actions_match_supported(priv, flow_action, parse_attr, flow, extack))
return -EOPNOTSUPP;
- }
- if (esw_attr->split_count > 0 && !mlx5_esw_has_fwd_fdb(priv->mdev)) {
- NL_SET_ERR_MSG_MOD(extack,
- "current firmware doesn't support split rule for port mirroring");
- netdev_warn_once(priv->netdev, "current firmware doesn't support split rule for port mirroring\n");
+ if (attr->dest_chain && decap) {
+ /* It can be supported if we'll create a mapping for
+ * the tunnel device only (without tunnel), and set
+ * this tunnel id with this decap flow.
+ *
+ * On restore (miss), we'll just set this saved tunnel
+ * device.
+ */
+
+ NL_SET_ERR_MSG(extack, "Decap with goto isn't supported");
+ netdev_warn(priv->netdev, "Decap with goto isn't supported");
return -EOPNOTSUPP;
}
@@ -4735,8 +4799,10 @@ static int scan_tc_matchall_fdb_actions(struct mlx5e_priv *priv,
return -EOPNOTSUPP;
}
- if (!flow_action_basic_hw_stats_check(flow_action, extack))
+ if (!flow_action_basic_hw_stats_check(flow_action, extack)) {
+ NL_SET_ERR_MSG_MOD(extack, "Flow action HW stats type is not supported");
return -EOPNOTSUPP;
+ }
flow_action_for_each(i, act, flow_action) {
switch (act->id) {
@@ -5008,9 +5074,7 @@ int mlx5e_tc_esw_init(struct rhashtable *tc_ht)
MLX5_FLOW_NAMESPACE_FDB,
uplink_priv->post_act);
-#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE)
uplink_priv->tc_psample = mlx5e_tc_sample_init(esw, uplink_priv->post_act);
-#endif
mapping_id = mlx5_query_nic_system_image_guid(esw->dev);
@@ -5024,9 +5088,11 @@ int mlx5e_tc_esw_init(struct rhashtable *tc_ht)
}
uplink_priv->tunnel_mapping = mapping;
- /* 0xFFF is reserved for stack devices slow path table mark */
+ /* Two last values are reserved for stack devices slow path table mark
+ * and bridge ingress push mark.
+ */
mapping = mapping_create_for_id(mapping_id, MAPPING_TYPE_TUNNEL_ENC_OPTS,
- sz_enc_opts, ENC_OPTS_BITS_MASK - 1, true);
+ sz_enc_opts, ENC_OPTS_BITS_MASK - 2, true);
if (IS_ERR(mapping)) {
err = PTR_ERR(mapping);
goto err_enc_opts_mapping;
@@ -5054,9 +5120,7 @@ err_ht_init:
err_enc_opts_mapping:
mapping_destroy(uplink_priv->tunnel_mapping);
err_tun_mapping:
-#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE)
mlx5e_tc_sample_cleanup(uplink_priv->tc_psample);
-#endif
mlx5_tc_ct_clean(uplink_priv->ct_priv);
netdev_warn(priv->netdev,
"Failed to initialize tc (eswitch), err: %d", err);
@@ -5076,9 +5140,7 @@ void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht)
mapping_destroy(uplink_priv->tunnel_enc_opts_mapping);
mapping_destroy(uplink_priv->tunnel_mapping);
-#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE)
mlx5e_tc_sample_cleanup(uplink_priv->tc_psample);
-#endif
mlx5_tc_ct_clean(uplink_priv->ct_priv);
mlx5e_tc_post_act_destroy(uplink_priv->post_act);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 605c8ecc3610..792e0d6aa861 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -632,6 +632,7 @@ static int create_async_eqs(struct mlx5_core_dev *dev)
mlx5_eq_notifier_register(dev, &table->cq_err_nb);
param = (struct mlx5_eq_param) {
+ .irq_index = MLX5_IRQ_EQ_CTRL,
.nent = MLX5_NUM_CMD_EQE,
.mask[0] = 1ull << MLX5_EVENT_TYPE_CMD,
};
@@ -644,6 +645,7 @@ static int create_async_eqs(struct mlx5_core_dev *dev)
mlx5_cmd_allowed_opcode(dev, CMD_ALLOWED_OPCODE_ALL);
param = (struct mlx5_eq_param) {
+ .irq_index = MLX5_IRQ_EQ_CTRL,
.nent = MLX5_NUM_ASYNC_EQE,
};
@@ -653,6 +655,7 @@ static int create_async_eqs(struct mlx5_core_dev *dev)
goto err2;
param = (struct mlx5_eq_param) {
+ .irq_index = MLX5_IRQ_EQ_CTRL,
.nent = /* TODO: sriov max_vf + */ 1,
.mask[0] = 1ull << MLX5_EVENT_TYPE_PAGE_REQUEST,
};
@@ -806,8 +809,8 @@ static int create_comp_eqs(struct mlx5_core_dev *dev)
ncomp_eqs = table->num_comp_eqs;
nent = MLX5_COMP_EQ_SIZE;
for (i = 0; i < ncomp_eqs; i++) {
- int vecidx = i + MLX5_IRQ_VEC_COMP_BASE;
struct mlx5_eq_param param = {};
+ int vecidx = i;
eq = kzalloc(sizeof(*eq), GFP_KERNEL);
if (!eq) {
@@ -953,9 +956,7 @@ static int set_rmap(struct mlx5_core_dev *mdev)
goto err_out;
}
- vecidx = MLX5_IRQ_VEC_COMP_BASE;
- for (; vecidx < eq_table->num_comp_eqs + MLX5_IRQ_VEC_COMP_BASE;
- vecidx++) {
+ for (vecidx = 0; vecidx < eq_table->num_comp_eqs; vecidx++) {
err = irq_cpu_rmap_add(eq_table->rmap,
pci_irq_vector(mdev->pdev, vecidx));
if (err) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
index 7e221038df8d..588622ba38c1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
@@ -28,7 +28,10 @@
#define MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_TO (MLX5_ESW_BRIDGE_EGRESS_TABLE_SIZE / 2 - 1)
#define MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_FROM \
(MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_TO + 1)
-#define MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_TO (MLX5_ESW_BRIDGE_EGRESS_TABLE_SIZE - 1)
+#define MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_TO (MLX5_ESW_BRIDGE_EGRESS_TABLE_SIZE - 2)
+#define MLX5_ESW_BRIDGE_EGRESS_TABLE_MISS_GRP_IDX_FROM \
+ (MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_TO + 1)
+#define MLX5_ESW_BRIDGE_EGRESS_TABLE_MISS_GRP_IDX_TO (MLX5_ESW_BRIDGE_EGRESS_TABLE_SIZE - 1)
#define MLX5_ESW_BRIDGE_SKIP_TABLE_SIZE 0
@@ -61,6 +64,9 @@ struct mlx5_esw_bridge {
struct mlx5_flow_table *egress_ft;
struct mlx5_flow_group *egress_vlan_fg;
struct mlx5_flow_group *egress_mac_fg;
+ struct mlx5_flow_group *egress_miss_fg;
+ struct mlx5_pkt_reformat *egress_miss_pkt_reformat;
+ struct mlx5_flow_handle *egress_miss_handle;
unsigned long ageing_time;
u32 flags;
};
@@ -86,6 +92,26 @@ mlx5_esw_bridge_fdb_del_notify(struct mlx5_esw_bridge_fdb_entry *entry)
SWITCHDEV_FDB_DEL_TO_BRIDGE);
}
+static bool mlx5_esw_bridge_pkt_reformat_vlan_pop_supported(struct mlx5_eswitch *esw)
+{
+ return BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat_remove)) &&
+ MLX5_CAP_GEN_2(esw->dev, max_reformat_remove_size) >= sizeof(struct vlan_hdr) &&
+ MLX5_CAP_GEN_2(esw->dev, max_reformat_remove_offset) >=
+ offsetof(struct vlan_ethhdr, h_vlan_proto);
+}
+
+static struct mlx5_pkt_reformat *
+mlx5_esw_bridge_pkt_reformat_vlan_pop_create(struct mlx5_eswitch *esw)
+{
+ struct mlx5_pkt_reformat_params reformat_params = {};
+
+ reformat_params.type = MLX5_REFORMAT_TYPE_REMOVE_HDR;
+ reformat_params.param_0 = MLX5_REFORMAT_CONTEXT_ANCHOR_MAC_START;
+ reformat_params.param_1 = offsetof(struct vlan_ethhdr, h_vlan_proto);
+ reformat_params.size = sizeof(struct vlan_hdr);
+ return mlx5_packet_reformat_alloc(esw->dev, &reformat_params, MLX5_FLOW_NAMESPACE_FDB);
+}
+
static struct mlx5_flow_table *
mlx5_esw_bridge_table_create(int max_fte, u32 level, struct mlx5_eswitch *esw)
{
@@ -287,43 +313,74 @@ mlx5_esw_bridge_egress_mac_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow_
return fg;
}
+static struct mlx5_flow_group *
+mlx5_esw_bridge_egress_miss_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *egress_ft)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_group *fg;
+ u32 *in, *match;
+
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return ERR_PTR(-ENOMEM);
+
+ MLX5_SET(create_flow_group_in, in, match_criteria_enable, MLX5_MATCH_MISC_PARAMETERS_2);
+ match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
+
+ MLX5_SET(fte_match_param, match, misc_parameters_2.metadata_reg_c_1, ESW_TUN_MASK);
+
+ MLX5_SET(create_flow_group_in, in, start_flow_index,
+ MLX5_ESW_BRIDGE_EGRESS_TABLE_MISS_GRP_IDX_FROM);
+ MLX5_SET(create_flow_group_in, in, end_flow_index,
+ MLX5_ESW_BRIDGE_EGRESS_TABLE_MISS_GRP_IDX_TO);
+
+ fg = mlx5_create_flow_group(egress_ft, in);
+ if (IS_ERR(fg))
+ esw_warn(esw->dev,
+ "Failed to create bridge egress table miss flow group (err=%ld)\n",
+ PTR_ERR(fg));
+ kvfree(in);
+ return fg;
+}
+
static int
mlx5_esw_bridge_ingress_table_init(struct mlx5_esw_bridge_offloads *br_offloads)
{
struct mlx5_flow_group *mac_fg, *filter_fg, *vlan_fg;
struct mlx5_flow_table *ingress_ft, *skip_ft;
+ struct mlx5_eswitch *esw = br_offloads->esw;
int err;
- if (!mlx5_eswitch_vport_match_metadata_enabled(br_offloads->esw))
+ if (!mlx5_eswitch_vport_match_metadata_enabled(esw))
return -EOPNOTSUPP;
ingress_ft = mlx5_esw_bridge_table_create(MLX5_ESW_BRIDGE_INGRESS_TABLE_SIZE,
MLX5_ESW_BRIDGE_LEVEL_INGRESS_TABLE,
- br_offloads->esw);
+ esw);
if (IS_ERR(ingress_ft))
return PTR_ERR(ingress_ft);
skip_ft = mlx5_esw_bridge_table_create(MLX5_ESW_BRIDGE_SKIP_TABLE_SIZE,
MLX5_ESW_BRIDGE_LEVEL_SKIP_TABLE,
- br_offloads->esw);
+ esw);
if (IS_ERR(skip_ft)) {
err = PTR_ERR(skip_ft);
goto err_skip_tbl;
}
- vlan_fg = mlx5_esw_bridge_ingress_vlan_fg_create(br_offloads->esw, ingress_ft);
+ vlan_fg = mlx5_esw_bridge_ingress_vlan_fg_create(esw, ingress_ft);
if (IS_ERR(vlan_fg)) {
err = PTR_ERR(vlan_fg);
goto err_vlan_fg;
}
- filter_fg = mlx5_esw_bridge_ingress_filter_fg_create(br_offloads->esw, ingress_ft);
+ filter_fg = mlx5_esw_bridge_ingress_filter_fg_create(esw, ingress_ft);
if (IS_ERR(filter_fg)) {
err = PTR_ERR(filter_fg);
goto err_filter_fg;
}
- mac_fg = mlx5_esw_bridge_ingress_mac_fg_create(br_offloads->esw, ingress_ft);
+ mac_fg = mlx5_esw_bridge_ingress_mac_fg_create(esw, ingress_ft);
if (IS_ERR(mac_fg)) {
err = PTR_ERR(mac_fg);
goto err_mac_fg;
@@ -362,35 +419,82 @@ mlx5_esw_bridge_ingress_table_cleanup(struct mlx5_esw_bridge_offloads *br_offloa
br_offloads->ingress_ft = NULL;
}
+static struct mlx5_flow_handle *
+mlx5_esw_bridge_egress_miss_flow_create(struct mlx5_flow_table *egress_ft,
+ struct mlx5_flow_table *skip_ft,
+ struct mlx5_pkt_reformat *pkt_reformat);
+
static int
mlx5_esw_bridge_egress_table_init(struct mlx5_esw_bridge_offloads *br_offloads,
struct mlx5_esw_bridge *bridge)
{
- struct mlx5_flow_group *mac_fg, *vlan_fg;
+ struct mlx5_flow_group *miss_fg = NULL, *mac_fg, *vlan_fg;
+ struct mlx5_pkt_reformat *miss_pkt_reformat = NULL;
+ struct mlx5_flow_handle *miss_handle = NULL;
+ struct mlx5_eswitch *esw = br_offloads->esw;
struct mlx5_flow_table *egress_ft;
int err;
egress_ft = mlx5_esw_bridge_table_create(MLX5_ESW_BRIDGE_EGRESS_TABLE_SIZE,
MLX5_ESW_BRIDGE_LEVEL_EGRESS_TABLE,
- br_offloads->esw);
+ esw);
if (IS_ERR(egress_ft))
return PTR_ERR(egress_ft);
- vlan_fg = mlx5_esw_bridge_egress_vlan_fg_create(br_offloads->esw, egress_ft);
+ vlan_fg = mlx5_esw_bridge_egress_vlan_fg_create(esw, egress_ft);
if (IS_ERR(vlan_fg)) {
err = PTR_ERR(vlan_fg);
goto err_vlan_fg;
}
- mac_fg = mlx5_esw_bridge_egress_mac_fg_create(br_offloads->esw, egress_ft);
+ mac_fg = mlx5_esw_bridge_egress_mac_fg_create(esw, egress_ft);
if (IS_ERR(mac_fg)) {
err = PTR_ERR(mac_fg);
goto err_mac_fg;
}
+ if (mlx5_esw_bridge_pkt_reformat_vlan_pop_supported(esw)) {
+ miss_fg = mlx5_esw_bridge_egress_miss_fg_create(esw, egress_ft);
+ if (IS_ERR(miss_fg)) {
+ esw_warn(esw->dev, "Failed to create miss flow group (err=%ld)\n",
+ PTR_ERR(miss_fg));
+ miss_fg = NULL;
+ goto skip_miss_flow;
+ }
+
+ miss_pkt_reformat = mlx5_esw_bridge_pkt_reformat_vlan_pop_create(esw);
+ if (IS_ERR(miss_pkt_reformat)) {
+ esw_warn(esw->dev,
+ "Failed to alloc packet reformat REMOVE_HEADER (err=%ld)\n",
+ PTR_ERR(miss_pkt_reformat));
+ miss_pkt_reformat = NULL;
+ mlx5_destroy_flow_group(miss_fg);
+ miss_fg = NULL;
+ goto skip_miss_flow;
+ }
+
+ miss_handle = mlx5_esw_bridge_egress_miss_flow_create(egress_ft,
+ br_offloads->skip_ft,
+ miss_pkt_reformat);
+ if (IS_ERR(miss_handle)) {
+ esw_warn(esw->dev, "Failed to create miss flow (err=%ld)\n",
+ PTR_ERR(miss_handle));
+ miss_handle = NULL;
+ mlx5_packet_reformat_dealloc(esw->dev, miss_pkt_reformat);
+ miss_pkt_reformat = NULL;
+ mlx5_destroy_flow_group(miss_fg);
+ miss_fg = NULL;
+ goto skip_miss_flow;
+ }
+ }
+skip_miss_flow:
+
bridge->egress_ft = egress_ft;
bridge->egress_vlan_fg = vlan_fg;
bridge->egress_mac_fg = mac_fg;
+ bridge->egress_miss_fg = miss_fg;
+ bridge->egress_miss_pkt_reformat = miss_pkt_reformat;
+ bridge->egress_miss_handle = miss_handle;
return 0;
err_mac_fg:
@@ -403,6 +507,13 @@ err_vlan_fg:
static void
mlx5_esw_bridge_egress_table_cleanup(struct mlx5_esw_bridge *bridge)
{
+ if (bridge->egress_miss_handle)
+ mlx5_del_flow_rules(bridge->egress_miss_handle);
+ if (bridge->egress_miss_pkt_reformat)
+ mlx5_packet_reformat_dealloc(bridge->br_offloads->esw->dev,
+ bridge->egress_miss_pkt_reformat);
+ if (bridge->egress_miss_fg)
+ mlx5_destroy_flow_group(bridge->egress_miss_fg);
mlx5_destroy_flow_group(bridge->egress_mac_fg);
mlx5_destroy_flow_group(bridge->egress_vlan_fg);
mlx5_destroy_flow_table(bridge->egress_ft);
@@ -443,8 +554,10 @@ mlx5_esw_bridge_ingress_flow_with_esw_create(u16 vport_num, const unsigned char
mlx5_eswitch_get_vport_metadata_for_match(esw, vport_num));
if (vlan && vlan->pkt_reformat_push) {
- flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
+ flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
+ MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
flow_act.pkt_reformat = vlan->pkt_reformat_push;
+ flow_act.modify_hdr = vlan->pkt_mod_hdr_push_mark;
} else if (vlan) {
MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
outer_headers.cvlan_tag);
@@ -564,6 +677,10 @@ mlx5_esw_bridge_egress_flow_create(u16 vport_num, u16 esw_owner_vhca_id, const u
if (!rule_spec)
return ERR_PTR(-ENOMEM);
+ if (MLX5_CAP_ESW_FLOWTABLE(bridge->br_offloads->esw->dev, flow_source) &&
+ vport_num == MLX5_VPORT_UPLINK)
+ rule_spec->flow_context.flow_source =
+ MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT;
rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
dmac_v = MLX5_ADDR_OF(fte_match_param, rule_spec->match_value,
@@ -599,6 +716,41 @@ mlx5_esw_bridge_egress_flow_create(u16 vport_num, u16 esw_owner_vhca_id, const u
return handle;
}
+static struct mlx5_flow_handle *
+mlx5_esw_bridge_egress_miss_flow_create(struct mlx5_flow_table *egress_ft,
+ struct mlx5_flow_table *skip_ft,
+ struct mlx5_pkt_reformat *pkt_reformat)
+{
+ struct mlx5_flow_destination dest = {
+ .type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE,
+ .ft = skip_ft,
+ };
+ struct mlx5_flow_act flow_act = {
+ .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT,
+ .flags = FLOW_ACT_NO_APPEND,
+ .pkt_reformat = pkt_reformat,
+ };
+ struct mlx5_flow_spec *rule_spec;
+ struct mlx5_flow_handle *handle;
+
+ rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL);
+ if (!rule_spec)
+ return ERR_PTR(-ENOMEM);
+
+ rule_spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
+
+ MLX5_SET(fte_match_param, rule_spec->match_criteria,
+ misc_parameters_2.metadata_reg_c_1, ESW_TUN_MASK);
+ MLX5_SET(fte_match_param, rule_spec->match_value, misc_parameters_2.metadata_reg_c_1,
+ ESW_TUN_BRIDGE_INGRESS_PUSH_VLAN_MARK);
+
+ handle = mlx5_add_flow_rules(egress_ft, rule_spec, &flow_act, &dest, 1);
+
+ kvfree(rule_spec);
+ return handle;
+}
+
static struct mlx5_esw_bridge *mlx5_esw_bridge_create(int ifindex,
struct mlx5_esw_bridge_offloads *br_offloads)
{
@@ -798,24 +950,14 @@ mlx5_esw_bridge_vlan_push_cleanup(struct mlx5_esw_bridge_vlan *vlan, struct mlx5
static int
mlx5_esw_bridge_vlan_pop_create(struct mlx5_esw_bridge_vlan *vlan, struct mlx5_eswitch *esw)
{
- struct mlx5_pkt_reformat_params reformat_params = {};
struct mlx5_pkt_reformat *pkt_reformat;
- if (!BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat_remove)) ||
- MLX5_CAP_GEN_2(esw->dev, max_reformat_remove_size) < sizeof(struct vlan_hdr) ||
- MLX5_CAP_GEN_2(esw->dev, max_reformat_remove_offset) <
- offsetof(struct vlan_ethhdr, h_vlan_proto)) {
+ if (!mlx5_esw_bridge_pkt_reformat_vlan_pop_supported(esw)) {
esw_warn(esw->dev, "Packet reformat REMOVE_HEADER is not supported\n");
return -EOPNOTSUPP;
}
- reformat_params.type = MLX5_REFORMAT_TYPE_REMOVE_HDR;
- reformat_params.param_0 = MLX5_REFORMAT_CONTEXT_ANCHOR_MAC_START;
- reformat_params.param_1 = offsetof(struct vlan_ethhdr, h_vlan_proto);
- reformat_params.size = sizeof(struct vlan_hdr);
- pkt_reformat = mlx5_packet_reformat_alloc(esw->dev,
- &reformat_params,
- MLX5_FLOW_NAMESPACE_FDB);
+ pkt_reformat = mlx5_esw_bridge_pkt_reformat_vlan_pop_create(esw);
if (IS_ERR(pkt_reformat)) {
esw_warn(esw->dev, "Failed to alloc packet reformat REMOVE_HEADER (err=%ld)\n",
PTR_ERR(pkt_reformat));
@@ -833,6 +975,33 @@ mlx5_esw_bridge_vlan_pop_cleanup(struct mlx5_esw_bridge_vlan *vlan, struct mlx5_
vlan->pkt_reformat_pop = NULL;
}
+static int
+mlx5_esw_bridge_vlan_push_mark_create(struct mlx5_esw_bridge_vlan *vlan, struct mlx5_eswitch *esw)
+{
+ u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
+ struct mlx5_modify_hdr *pkt_mod_hdr;
+
+ MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
+ MLX5_SET(set_action_in, action, field, MLX5_ACTION_IN_FIELD_METADATA_REG_C_1);
+ MLX5_SET(set_action_in, action, offset, 8);
+ MLX5_SET(set_action_in, action, length, ESW_TUN_OPTS_BITS + ESW_TUN_ID_BITS);
+ MLX5_SET(set_action_in, action, data, ESW_TUN_BRIDGE_INGRESS_PUSH_VLAN);
+
+ pkt_mod_hdr = mlx5_modify_header_alloc(esw->dev, MLX5_FLOW_NAMESPACE_FDB, 1, action);
+ if (IS_ERR(pkt_mod_hdr))
+ return PTR_ERR(pkt_mod_hdr);
+
+ vlan->pkt_mod_hdr_push_mark = pkt_mod_hdr;
+ return 0;
+}
+
+static void
+mlx5_esw_bridge_vlan_push_mark_cleanup(struct mlx5_esw_bridge_vlan *vlan, struct mlx5_eswitch *esw)
+{
+ mlx5_modify_header_dealloc(esw->dev, vlan->pkt_mod_hdr_push_mark);
+ vlan->pkt_mod_hdr_push_mark = NULL;
+}
+
static struct mlx5_esw_bridge_vlan *
mlx5_esw_bridge_vlan_create(u16 vid, u16 flags, struct mlx5_esw_bridge_port *port,
struct mlx5_eswitch *esw)
@@ -852,6 +1021,10 @@ mlx5_esw_bridge_vlan_create(u16 vid, u16 flags, struct mlx5_esw_bridge_port *por
err = mlx5_esw_bridge_vlan_push_create(vlan, esw);
if (err)
goto err_vlan_push;
+
+ err = mlx5_esw_bridge_vlan_push_mark_create(vlan, esw);
+ if (err)
+ goto err_vlan_push_mark;
}
if (flags & BRIDGE_VLAN_INFO_UNTAGGED) {
err = mlx5_esw_bridge_vlan_pop_create(vlan, esw);
@@ -870,6 +1043,9 @@ err_xa_insert:
if (vlan->pkt_reformat_pop)
mlx5_esw_bridge_vlan_pop_cleanup(vlan, esw);
err_vlan_pop:
+ if (vlan->pkt_mod_hdr_push_mark)
+ mlx5_esw_bridge_vlan_push_mark_cleanup(vlan, esw);
+err_vlan_push_mark:
if (vlan->pkt_reformat_push)
mlx5_esw_bridge_vlan_push_cleanup(vlan, esw);
err_vlan_push:
@@ -886,6 +1062,7 @@ static void mlx5_esw_bridge_vlan_erase(struct mlx5_esw_bridge_port *port,
static void mlx5_esw_bridge_vlan_flush(struct mlx5_esw_bridge_vlan *vlan,
struct mlx5_esw_bridge *bridge)
{
+ struct mlx5_eswitch *esw = bridge->br_offloads->esw;
struct mlx5_esw_bridge_fdb_entry *entry, *tmp;
list_for_each_entry_safe(entry, tmp, &vlan->fdb_list, vlan_list) {
@@ -894,9 +1071,11 @@ static void mlx5_esw_bridge_vlan_flush(struct mlx5_esw_bridge_vlan *vlan,
}
if (vlan->pkt_reformat_pop)
- mlx5_esw_bridge_vlan_pop_cleanup(vlan, bridge->br_offloads->esw);
+ mlx5_esw_bridge_vlan_pop_cleanup(vlan, esw);
+ if (vlan->pkt_mod_hdr_push_mark)
+ mlx5_esw_bridge_vlan_push_mark_cleanup(vlan, esw);
if (vlan->pkt_reformat_push)
- mlx5_esw_bridge_vlan_push_cleanup(vlan, bridge->br_offloads->esw);
+ mlx5_esw_bridge_vlan_push_cleanup(vlan, esw);
}
static void mlx5_esw_bridge_vlan_cleanup(struct mlx5_esw_bridge_port *port,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_priv.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_priv.h
index 52964a82d6a6..878311fe950a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_priv.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_priv.h
@@ -49,6 +49,7 @@ struct mlx5_esw_bridge_vlan {
struct list_head fdb_list;
struct mlx5_pkt_reformat *pkt_reformat_push;
struct mlx5_pkt_reformat *pkt_reformat_pop;
+ struct mlx5_modify_hdr *pkt_mod_hdr_push_mark;
};
struct mlx5_esw_bridge_port {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
index 20af557ae30c..7f9b96d9537e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
@@ -36,7 +36,7 @@ static struct devlink_port *mlx5_esw_dl_port_alloc(struct mlx5_eswitch *esw, u16
return NULL;
mlx5_esw_get_port_parent_id(dev, &ppid);
- pfnum = PCI_FUNC(dev->pdev->devfn);
+ pfnum = mlx5_get_dev_index(dev);
external = mlx5_core_is_ecpf_esw_manager(dev);
if (external)
controller_num = dev->priv.eswitch->offloads.host_number + 1;
@@ -149,7 +149,7 @@ int mlx5_esw_devlink_sf_port_register(struct mlx5_eswitch *esw, struct devlink_p
if (IS_ERR(vport))
return PTR_ERR(vport);
- pfnum = PCI_FUNC(dev->pdev->devfn);
+ pfnum = mlx5_get_dev_index(dev);
mlx5_esw_get_port_parent_id(dev, &ppid);
memcpy(dl_port->attrs.switch_id.id, &ppid.id[0], ppid.id_len);
dl_port->attrs.switch_id.id_len = ppid.id_len;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 2c7444101bb9..28467f11f04b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -433,7 +433,7 @@ enum mlx5_flow_match_level {
};
/* current maximum for flow based vport multicasting */
-#define MLX5_MAX_FLOW_FWD_VPORTS 2
+#define MLX5_MAX_FLOW_FWD_VPORTS 32
enum {
MLX5_ESW_DEST_ENCAP = BIT(0),
@@ -447,8 +447,16 @@ enum {
MLX5_ESW_ATTR_FLAG_NO_IN_PORT = BIT(2),
MLX5_ESW_ATTR_FLAG_SRC_REWRITE = BIT(3),
MLX5_ESW_ATTR_FLAG_SAMPLE = BIT(4),
+ MLX5_ESW_ATTR_FLAG_ACCEPT = BIT(5),
};
+/* Returns true if any of the flags that require skipping further TC/NF processing are set. */
+static inline bool
+mlx5_esw_attr_flags_skip(u32 attr_flags)
+{
+ return attr_flags & (MLX5_ESW_ATTR_FLAG_SLOW_PATH | MLX5_ESW_ATTR_FLAG_ACCEPT);
+}
+
struct mlx5_esw_flow_attr {
struct mlx5_eswitch_rep *in_rep;
struct mlx5_core_dev *in_mdev;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 0d461e38add3..0ef126fd6a8e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -440,7 +440,7 @@ esw_setup_dests(struct mlx5_flow_destination *dest,
} else if (attr->dest_ft) {
esw_setup_ft_dest(dest, flow_act, esw, attr, spec, *i);
(*i)++;
- } else if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) {
+ } else if (mlx5_esw_attr_flags_skip(attr->flags)) {
esw_setup_slow_path_dest(dest, flow_act, chains, *i);
(*i)++;
} else if (attr->dest_chain) {
@@ -467,7 +467,7 @@ esw_cleanup_dests(struct mlx5_eswitch *esw,
if (attr->dest_ft) {
esw_cleanup_decap_indir(esw, attr);
- } else if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH)) {
+ } else if (!mlx5_esw_attr_flags_skip(attr->flags)) {
if (attr->dest_chain)
esw_cleanup_chain_dest(chains, attr->dest_chain, 1, 0);
else if (esw_is_indir_table(esw, attr))
@@ -482,12 +482,12 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_spec *spec,
struct mlx5_flow_attr *attr)
{
- struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
struct mlx5_fs_chains *chains = esw_chains(esw);
bool split = !!(esw_attr->split_count);
struct mlx5_vport_tbl_attr fwd_attr;
+ struct mlx5_flow_destination *dest;
struct mlx5_flow_handle *rule;
struct mlx5_flow_table *fdb;
int i = 0;
@@ -495,6 +495,10 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
if (esw->mode != MLX5_ESWITCH_OFFLOADS)
return ERR_PTR(-EOPNOTSUPP);
+ dest = kcalloc(MLX5_MAX_FLOW_FWD_VPORTS + 1, sizeof(*dest), GFP_KERNEL);
+ if (!dest)
+ return ERR_PTR(-ENOMEM);
+
flow_act.action = attr->action;
/* if per flow vlan pop/push is emulated, don't set that into the firmware */
if (!mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
@@ -574,6 +578,7 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
else
atomic64_inc(&esw->offloads.num_flows);
+ kfree(dest);
return rule;
err_add_rule:
@@ -584,6 +589,7 @@ err_add_rule:
err_esw_get:
esw_cleanup_dests(esw, attr);
err_create_goto_table:
+ kfree(dest);
return rule;
}
@@ -592,16 +598,20 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_spec *spec,
struct mlx5_flow_attr *attr)
{
- struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
struct mlx5_fs_chains *chains = esw_chains(esw);
struct mlx5_vport_tbl_attr fwd_attr;
+ struct mlx5_flow_destination *dest;
struct mlx5_flow_table *fast_fdb;
struct mlx5_flow_table *fwd_fdb;
struct mlx5_flow_handle *rule;
int i, err = 0;
+ dest = kcalloc(MLX5_MAX_FLOW_FWD_VPORTS + 1, sizeof(*dest), GFP_KERNEL);
+ if (!dest)
+ return ERR_PTR(-ENOMEM);
+
fast_fdb = mlx5_chains_get_table(chains, attr->chain, attr->prio, 0);
if (IS_ERR(fast_fdb)) {
rule = ERR_CAST(fast_fdb);
@@ -654,6 +664,7 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
atomic64_inc(&esw->offloads.num_flows);
+ kfree(dest);
return rule;
err_chain_src_rewrite:
esw_put_dest_tables_loop(esw, attr, 0, i);
@@ -661,6 +672,7 @@ err_chain_src_rewrite:
err_get_fwd:
mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
err_get_fast:
+ kfree(dest);
return rule;
}
@@ -678,7 +690,7 @@ __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw,
mlx5_del_flow_rules(rule);
- if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH)) {
+ if (!mlx5_esw_attr_flags_skip(attr->flags)) {
/* unref the term table */
for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) {
if (esw_attr->dests[i].termtbl)
@@ -1009,7 +1021,7 @@ mlx5_eswitch_add_send_to_vport_meta_rules(struct mlx5_eswitch *esw)
u16 vport_num;
num_vfs = esw->esw_funcs.num_vfs;
- flows = kvzalloc(num_vfs * sizeof(*flows), GFP_KERNEL);
+ flows = kvcalloc(num_vfs, sizeof(*flows), GFP_KERNEL);
if (!flows)
return -ENOMEM;
@@ -1188,7 +1200,7 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
peer_miss_rules_setup(esw, peer_dev, spec, &dest);
- flows = kvzalloc(nvports * sizeof(*flows), GFP_KERNEL);
+ flows = kvcalloc(nvports, sizeof(*flows), GFP_KERNEL);
if (!flows) {
err = -ENOMEM;
goto alloc_flows_err;
@@ -2798,7 +2810,7 @@ u32 mlx5_esw_match_metadata_alloc(struct mlx5_eswitch *esw)
int id;
/* Only 4 bits of pf_num */
- pf_num = PCI_FUNC(esw->dev->pdev->devfn);
+ pf_num = mlx5_get_dev_index(esw->dev);
if (pf_num > max_pf_num)
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
index b45954905845..879d78e46e47 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
@@ -219,7 +219,7 @@ mlx5_eswitch_termtbl_required(struct mlx5_eswitch *esw,
if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, termination_table) ||
!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level) ||
- attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH ||
+ mlx5_esw_attr_flags_skip(attr->flags) ||
!mlx5_eswitch_offload_is_uplink_port(esw, spec))
return false;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index 7db8df64a60e..750b21124a1a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -185,6 +185,20 @@ static int mlx5_cmd_set_slave_root_fdb(struct mlx5_core_dev *master,
return mlx5_cmd_exec(slave, in, sizeof(in), out, sizeof(out));
}
+static int
+mlx5_cmd_stub_destroy_match_definer(struct mlx5_flow_root_namespace *ns,
+ int definer_id)
+{
+ return 0;
+}
+
+static int
+mlx5_cmd_stub_create_match_definer(struct mlx5_flow_root_namespace *ns,
+ u16 format_id, u32 *match_mask)
+{
+ return 0;
+}
+
static int mlx5_cmd_update_root_ft(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft, u32 underlay_qpn,
bool disconnect)
@@ -563,8 +577,8 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
id = dst->dest_attr.ft->id;
break;
+ case MLX5_FLOW_DESTINATION_TYPE_UPLINK:
case MLX5_FLOW_DESTINATION_TYPE_VPORT:
- id = dst->dest_attr.vport.num;
MLX5_SET(dest_format_struct, in_dests,
destination_eswitch_owner_vhca_id_valid,
!!(dst->dest_attr.vport.flags &
@@ -572,6 +586,12 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
MLX5_SET(dest_format_struct, in_dests,
destination_eswitch_owner_vhca_id,
dst->dest_attr.vport.vhca_id);
+ if (type == MLX5_FLOW_DESTINATION_TYPE_UPLINK) {
+ /* destination_id is reserved */
+ id = 0;
+ break;
+ }
+ id = dst->dest_attr.vport.num;
if (extended_dest &&
dst->dest_attr.vport.pkt_reformat) {
MLX5_SET(dest_format_struct, in_dests,
@@ -909,6 +929,45 @@ static void mlx5_cmd_modify_header_dealloc(struct mlx5_flow_root_namespace *ns,
mlx5_cmd_exec_in(dev, dealloc_modify_header_context, in);
}
+static int mlx5_cmd_destroy_match_definer(struct mlx5_flow_root_namespace *ns,
+ int definer_id)
+{
+ u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
+
+ MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
+ MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
+ MLX5_OBJ_TYPE_MATCH_DEFINER);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, definer_id);
+
+ return mlx5_cmd_exec(ns->dev, in, sizeof(in), out, sizeof(out));
+}
+
+static int mlx5_cmd_create_match_definer(struct mlx5_flow_root_namespace *ns,
+ u16 format_id, u32 *match_mask)
+{
+ u32 out[MLX5_ST_SZ_DW(create_match_definer_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(create_match_definer_in)] = {};
+ struct mlx5_core_dev *dev = ns->dev;
+ void *ptr;
+ int err;
+
+ MLX5_SET(create_match_definer_in, in, general_obj_in_cmd_hdr.opcode,
+ MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+ MLX5_SET(create_match_definer_in, in, general_obj_in_cmd_hdr.obj_type,
+ MLX5_OBJ_TYPE_MATCH_DEFINER);
+
+ ptr = MLX5_ADDR_OF(create_match_definer_in, in, obj_context);
+ MLX5_SET(match_definer, ptr, format_id, format_id);
+
+ ptr = MLX5_ADDR_OF(match_definer, ptr, match_mask);
+ memcpy(ptr, match_mask, MLX5_FLD_SZ_BYTES(match_definer, match_mask));
+
+ err = mlx5_cmd_exec_inout(dev, create_match_definer, in, out);
+ return err ? err : MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+}
+
static const struct mlx5_flow_cmds mlx5_flow_cmds = {
.create_flow_table = mlx5_cmd_create_flow_table,
.destroy_flow_table = mlx5_cmd_destroy_flow_table,
@@ -923,6 +982,8 @@ static const struct mlx5_flow_cmds mlx5_flow_cmds = {
.packet_reformat_dealloc = mlx5_cmd_packet_reformat_dealloc,
.modify_header_alloc = mlx5_cmd_modify_header_alloc,
.modify_header_dealloc = mlx5_cmd_modify_header_dealloc,
+ .create_match_definer = mlx5_cmd_create_match_definer,
+ .destroy_match_definer = mlx5_cmd_destroy_match_definer,
.set_peer = mlx5_cmd_stub_set_peer,
.create_ns = mlx5_cmd_stub_create_ns,
.destroy_ns = mlx5_cmd_stub_destroy_ns,
@@ -942,6 +1003,8 @@ static const struct mlx5_flow_cmds mlx5_flow_cmd_stubs = {
.packet_reformat_dealloc = mlx5_cmd_stub_packet_reformat_dealloc,
.modify_header_alloc = mlx5_cmd_stub_modify_header_alloc,
.modify_header_dealloc = mlx5_cmd_stub_modify_header_dealloc,
+ .create_match_definer = mlx5_cmd_stub_create_match_definer,
+ .destroy_match_definer = mlx5_cmd_stub_destroy_match_definer,
.set_peer = mlx5_cmd_stub_set_peer,
.create_ns = mlx5_cmd_stub_create_ns,
.destroy_ns = mlx5_cmd_stub_destroy_ns,
@@ -969,6 +1032,7 @@ const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default(enum fs_flow_table_type typ
case FS_FT_NIC_TX:
case FS_FT_RDMA_RX:
case FS_FT_RDMA_TX:
+ case FS_FT_PORT_SEL:
return mlx5_fs_cmd_get_fw_cmds();
default:
return mlx5_fs_cmd_get_stub_cmds();
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
index 5ecd33cdc087..220ec632d35a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
@@ -97,6 +97,10 @@ struct mlx5_flow_cmds {
int (*create_ns)(struct mlx5_flow_root_namespace *ns);
int (*destroy_ns)(struct mlx5_flow_root_namespace *ns);
+ int (*create_match_definer)(struct mlx5_flow_root_namespace *ns,
+ u16 format_id, u32 *match_mask);
+ int (*destroy_match_definer)(struct mlx5_flow_root_namespace *ns,
+ int definer_id);
};
int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index fe501ba88bea..873efde0d458 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -2191,6 +2191,10 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
if (steering->fdb_root_ns)
return &steering->fdb_root_ns->ns;
return NULL;
+ case MLX5_FLOW_NAMESPACE_PORT_SEL:
+ if (steering->port_sel_root_ns)
+ return &steering->port_sel_root_ns->ns;
+ return NULL;
case MLX5_FLOW_NAMESPACE_SNIFFER_RX:
if (steering->sniffer_rx_root_ns)
return &steering->sniffer_rx_root_ns->ns;
@@ -2596,6 +2600,7 @@ void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
steering->fdb_root_ns = NULL;
kfree(steering->fdb_sub_ns);
steering->fdb_sub_ns = NULL;
+ cleanup_root_ns(steering->port_sel_root_ns);
cleanup_root_ns(steering->sniffer_rx_root_ns);
cleanup_root_ns(steering->sniffer_tx_root_ns);
cleanup_root_ns(steering->rdma_rx_root_ns);
@@ -2634,6 +2639,21 @@ static int init_sniffer_rx_root_ns(struct mlx5_flow_steering *steering)
return PTR_ERR_OR_ZERO(prio);
}
+#define PORT_SEL_NUM_LEVELS 3
+static int init_port_sel_root_ns(struct mlx5_flow_steering *steering)
+{
+ struct fs_prio *prio;
+
+ steering->port_sel_root_ns = create_root_ns(steering, FS_FT_PORT_SEL);
+ if (!steering->port_sel_root_ns)
+ return -ENOMEM;
+
+ /* Create single prio */
+ prio = fs_create_prio(&steering->port_sel_root_ns->ns, 0,
+ PORT_SEL_NUM_LEVELS);
+ return PTR_ERR_OR_ZERO(prio);
+}
+
static int init_rdma_rx_root_ns(struct mlx5_flow_steering *steering)
{
int err;
@@ -3020,6 +3040,12 @@ int mlx5_init_fs(struct mlx5_core_dev *dev)
goto err;
}
+ if (MLX5_CAP_FLOWTABLE_PORT_SELECTION(dev, ft_support)) {
+ err = init_port_sel_root_ns(steering);
+ if (err)
+ goto err;
+ }
+
if (MLX5_CAP_FLOWTABLE_RDMA_RX(dev, ft_support) &&
MLX5_CAP_FLOWTABLE_RDMA_RX(dev, table_miss_action_domain)) {
err = init_rdma_rx_root_ns(steering);
@@ -3224,6 +3250,52 @@ void mlx5_packet_reformat_dealloc(struct mlx5_core_dev *dev,
}
EXPORT_SYMBOL(mlx5_packet_reformat_dealloc);
+int mlx5_get_match_definer_id(struct mlx5_flow_definer *definer)
+{
+ return definer->id;
+}
+
+struct mlx5_flow_definer *
+mlx5_create_match_definer(struct mlx5_core_dev *dev,
+ enum mlx5_flow_namespace_type ns_type, u16 format_id,
+ u32 *match_mask)
+{
+ struct mlx5_flow_root_namespace *root;
+ struct mlx5_flow_definer *definer;
+ int id;
+
+ root = get_root_namespace(dev, ns_type);
+ if (!root)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ definer = kzalloc(sizeof(*definer), GFP_KERNEL);
+ if (!definer)
+ return ERR_PTR(-ENOMEM);
+
+ definer->ns_type = ns_type;
+ id = root->cmds->create_match_definer(root, format_id, match_mask);
+ if (id < 0) {
+ mlx5_core_warn(root->dev, "Failed to create match definer (%d)\n", id);
+ kfree(definer);
+ return ERR_PTR(id);
+ }
+ definer->id = id;
+ return definer;
+}
+
+void mlx5_destroy_match_definer(struct mlx5_core_dev *dev,
+ struct mlx5_flow_definer *definer)
+{
+ struct mlx5_flow_root_namespace *root;
+
+ root = get_root_namespace(dev, definer->ns_type);
+ if (WARN_ON(!root))
+ return;
+
+ root->cmds->destroy_match_definer(root, definer->id);
+ kfree(definer);
+}
+
int mlx5_flow_namespace_set_peer(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_root_namespace *peer_ns)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index 98240badc342..7711db245c63 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -49,6 +49,11 @@
#define FDB_TC_MAX_PRIO 16
#define FDB_TC_LEVELS_PER_PRIO 2
+struct mlx5_flow_definer {
+ enum mlx5_flow_namespace_type ns_type;
+ u32 id;
+};
+
struct mlx5_modify_hdr {
enum mlx5_flow_namespace_type ns_type;
union {
@@ -97,7 +102,8 @@ enum fs_flow_table_type {
FS_FT_SNIFFER_TX = 0X6,
FS_FT_RDMA_RX = 0X7,
FS_FT_RDMA_TX = 0X8,
- FS_FT_MAX_TYPE = FS_FT_RDMA_TX,
+ FS_FT_PORT_SEL = 0X9,
+ FS_FT_MAX_TYPE = FS_FT_PORT_SEL,
};
enum fs_flow_table_op_mod {
@@ -129,6 +135,7 @@ struct mlx5_flow_steering {
struct mlx5_flow_root_namespace *rdma_rx_root_ns;
struct mlx5_flow_root_namespace *rdma_tx_root_ns;
struct mlx5_flow_root_namespace *egress_root_ns;
+ struct mlx5_flow_root_namespace *port_sel_root_ns;
int esw_egress_acl_vports;
int esw_ingress_acl_vports;
};
@@ -341,7 +348,8 @@ struct mlx5_flow_root_namespace *find_root(struct fs_node *node);
(type == FS_FT_SNIFFER_TX) ? MLX5_CAP_FLOWTABLE_SNIFFER_TX(mdev, cap) : \
(type == FS_FT_RDMA_RX) ? MLX5_CAP_FLOWTABLE_RDMA_RX(mdev, cap) : \
(type == FS_FT_RDMA_TX) ? MLX5_CAP_FLOWTABLE_RDMA_TX(mdev, cap) : \
- (BUILD_BUG_ON_ZERO(FS_FT_RDMA_TX != FS_FT_MAX_TYPE))\
+ (type == FS_FT_PORT_SEL) ? MLX5_CAP_FLOWTABLE_PORT_SELECTION(mdev, cap) : \
+ (BUILD_BUG_ON_ZERO(FS_FT_PORT_SEL != FS_FT_MAX_TYPE))\
)
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
index 18e5aec14641..f542a36be62c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
@@ -497,8 +497,7 @@ static struct mlx5_fc_bulk *mlx5_fc_bulk_create(struct mlx5_core_dev *dev)
alloc_bitmask = MLX5_CAP_GEN(dev, flow_counter_bulk_alloc);
bulk_len = alloc_bitmask > 0 ? MLX5_FC_BULK_NUM_FCS(alloc_bitmask) : 1;
- bulk = kvzalloc(sizeof(*bulk) + bulk_len * sizeof(struct mlx5_fc),
- GFP_KERNEL);
+ bulk = kvzalloc(struct_size(bulk, fcs, bulk_len), GFP_KERNEL);
if (!bulk)
goto err_alloc_bulk;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index 016d26f809a5..1037e3629e7e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -35,6 +35,7 @@
#include <linux/module.h>
#include "mlx5_core.h"
#include "../../mlxfw/mlxfw.h"
+#include "lib/tout.h"
#include "accel/tls.h"
enum {
@@ -148,6 +149,12 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
if (err)
return err;
+ if (MLX5_CAP_GEN(dev, port_selection_cap)) {
+ err = mlx5_core_get_caps(dev, MLX5_CAP_PORT_SELECTION);
+ if (err)
+ return err;
+ }
+
if (MLX5_CAP_GEN(dev, hca_cap_2)) {
err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL_2);
if (err)
@@ -317,10 +324,9 @@ int mlx5_cmd_force_teardown_hca(struct mlx5_core_dev *dev)
return 0;
}
-#define MLX5_FAST_TEARDOWN_WAIT_MS 3000
int mlx5_cmd_fast_teardown_hca(struct mlx5_core_dev *dev)
{
- unsigned long end, delay_ms = MLX5_FAST_TEARDOWN_WAIT_MS;
+ unsigned long end, delay_ms = mlx5_tout_ms(dev, TEARDOWN);
u32 out[MLX5_ST_SZ_DW(teardown_hca_out)] = {};
u32 in[MLX5_ST_SZ_DW(teardown_hca_in)] = {};
int state;
@@ -618,17 +624,18 @@ static void mlx5_fsm_release(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
fwhandle, 0);
}
-#define MLX5_FSM_REACTIVATE_TOUT 5000 /* msecs */
static int mlx5_fsm_reactivate(struct mlxfw_dev *mlxfw_dev, u8 *status)
{
- unsigned long exp_time = jiffies + msecs_to_jiffies(MLX5_FSM_REACTIVATE_TOUT);
struct mlx5_mlxfw_dev *mlx5_mlxfw_dev =
container_of(mlxfw_dev, struct mlx5_mlxfw_dev, mlxfw_dev);
struct mlx5_core_dev *dev = mlx5_mlxfw_dev->mlx5_core_dev;
u32 out[MLX5_ST_SZ_DW(mirc_reg)];
u32 in[MLX5_ST_SZ_DW(mirc_reg)];
+ unsigned long exp_time;
int err;
+ exp_time = jiffies + msecs_to_jiffies(mlx5_tout_ms(dev, FSM_REACTIVATE));
+
if (!MLX5_CAP_MCAM_REG2(dev, mirc))
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
index 106b50e42b46..0b0234f9d694 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
@@ -3,6 +3,7 @@
#include "fw_reset.h"
#include "diag/fw_tracer.h"
+#include "lib/tout.h"
enum {
MLX5_FW_RESET_FLAGS_RESET_REQUESTED,
@@ -228,8 +229,6 @@ static void mlx5_sync_reset_request_event(struct work_struct *work)
mlx5_core_warn(dev, "PCI Sync FW Update Reset Ack. Device reset is expected.\n");
}
-#define MLX5_PCI_LINK_UP_TIMEOUT 2000
-
static int mlx5_pci_link_toggle(struct mlx5_core_dev *dev)
{
struct pci_bus *bridge_bus = dev->pdev->bus;
@@ -286,7 +285,7 @@ static int mlx5_pci_link_toggle(struct mlx5_core_dev *dev)
goto restore;
}
- timeout = jiffies + msecs_to_jiffies(MLX5_PCI_LINK_UP_TIMEOUT);
+ timeout = jiffies + msecs_to_jiffies(mlx5_tout_ms(dev, PCI_TOGGLE));
do {
err = pci_read_config_word(bridge, cap + PCI_EXP_LNKSTA, &reg16);
if (err)
@@ -299,8 +298,8 @@ static int mlx5_pci_link_toggle(struct mlx5_core_dev *dev)
if (reg16 & PCI_EXP_LNKSTA_DLLLA) {
mlx5_core_info(dev, "PCI Link up\n");
} else {
- mlx5_core_err(dev, "PCI link not ready (0x%04x) after %d ms\n",
- reg16, MLX5_PCI_LINK_UP_TIMEOUT);
+ mlx5_core_err(dev, "PCI link not ready (0x%04x) after %llu ms\n",
+ reg16, mlx5_tout_ms(dev, PCI_TOGGLE));
err = -ETIMEDOUT;
}
@@ -395,16 +394,16 @@ static int fw_reset_event_notifier(struct notifier_block *nb, unsigned long acti
return NOTIFY_OK;
}
-#define MLX5_FW_RESET_TIMEOUT_MSEC 5000
int mlx5_fw_reset_wait_reset_done(struct mlx5_core_dev *dev)
{
- unsigned long timeout = msecs_to_jiffies(MLX5_FW_RESET_TIMEOUT_MSEC);
+ unsigned long pci_sync_update_timeout = mlx5_tout_ms(dev, PCI_SYNC_UPDATE);
+ unsigned long timeout = msecs_to_jiffies(pci_sync_update_timeout);
struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
int err;
if (!wait_for_completion_timeout(&fw_reset->done, timeout)) {
- mlx5_core_warn(dev, "FW sync reset timeout after %d seconds\n",
- MLX5_FW_RESET_TIMEOUT_MSEC / 1000);
+ mlx5_core_warn(dev, "FW sync reset timeout after %lu seconds\n",
+ pci_sync_update_timeout / 1000);
err = -ETIMEDOUT;
goto out;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 037e18dd4be0..6a4dd7f78958 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -40,10 +40,10 @@
#include "lib/eq.h"
#include "lib/mlx5.h"
#include "lib/pci_vsc.h"
+#include "lib/tout.h"
#include "diag/fw_tracer.h"
enum {
- MLX5_HEALTH_POLL_INTERVAL = 2 * HZ,
MAX_MISSES = 3,
};
@@ -219,11 +219,9 @@ unlock:
mutex_unlock(&dev->intf_state_mutex);
}
-#define MLX5_CRDUMP_WAIT_MS 60000
-#define MLX5_FW_RESET_WAIT_MS 1000
void mlx5_error_sw_reset(struct mlx5_core_dev *dev)
{
- unsigned long end, delay_ms = MLX5_FW_RESET_WAIT_MS;
+ unsigned long end, delay_ms = mlx5_tout_ms(dev, PCI_TOGGLE);
int lock = -EBUSY;
mutex_lock(&dev->intf_state_mutex);
@@ -237,7 +235,7 @@ void mlx5_error_sw_reset(struct mlx5_core_dev *dev)
lock = lock_sem_sw_reset(dev, true);
if (lock == -EBUSY) {
- delay_ms = MLX5_CRDUMP_WAIT_MS;
+ delay_ms = mlx5_tout_ms(dev, FULL_CRDUMP);
goto recover_from_sw_reset;
}
/* Execute SW reset */
@@ -307,13 +305,11 @@ static void mlx5_handle_bad_state(struct mlx5_core_dev *dev)
mlx5_disable_device(dev);
}
-/* How much time to wait until health resetting the driver (in msecs) */
-#define MLX5_RECOVERY_WAIT_MSECS 60000
int mlx5_health_wait_pci_up(struct mlx5_core_dev *dev)
{
unsigned long end;
- end = jiffies + msecs_to_jiffies(MLX5_RECOVERY_WAIT_MSECS);
+ end = jiffies + msecs_to_jiffies(mlx5_tout_ms(dev, FW_RESET));
while (sensor_pci_not_working(dev)) {
if (time_after(jiffies, end))
return -ETIMEDOUT;
@@ -674,13 +670,13 @@ static void mlx5_fw_reporters_destroy(struct mlx5_core_dev *dev)
devlink_health_reporter_destroy(health->fw_fatal_reporter);
}
-static unsigned long get_next_poll_jiffies(void)
+static unsigned long get_next_poll_jiffies(struct mlx5_core_dev *dev)
{
unsigned long next;
get_random_bytes(&next, sizeof(next));
next %= HZ;
- next += jiffies + MLX5_HEALTH_POLL_INTERVAL;
+ next += jiffies + msecs_to_jiffies(mlx5_tout_ms(dev, HEALTH_POLL_INTERVAL));
return next;
}
@@ -740,11 +736,12 @@ static void poll_health(struct timer_list *t)
queue_work(health->wq, &health->report_work);
out:
- mod_timer(&health->timer, get_next_poll_jiffies());
+ mod_timer(&health->timer, get_next_poll_jiffies(dev));
}
void mlx5_start_health_poll(struct mlx5_core_dev *dev)
{
+ u64 poll_interval_ms = mlx5_tout_ms(dev, HEALTH_POLL_INTERVAL);
struct mlx5_core_health *health = &dev->priv.health;
timer_setup(&health->timer, poll_health, 0);
@@ -753,7 +750,7 @@ void mlx5_start_health_poll(struct mlx5_core_dev *dev)
health->health = &dev->iseg->health;
health->health_counter = &dev->iseg->health_counter;
- health->timer.expires = round_jiffies(jiffies + MLX5_HEALTH_POLL_INTERVAL);
+ health->timer.expires = jiffies + msecs_to_jiffies(poll_interval_ms);
add_timer(&health->timer);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
index 0c8594c7df21..ee0eb4a4b819 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
@@ -33,6 +33,11 @@
#include "en.h"
#include "ipoib.h"
+static u32 mlx5i_flow_type_mask(u32 flow_type)
+{
+ return flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS);
+}
+
static void mlx5i_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *drvinfo)
{
@@ -217,6 +222,27 @@ static int mlx5i_get_link_ksettings(struct net_device *netdev,
return 0;
}
+#ifdef CONFIG_MLX5_EN_RXNFC
+static int mlx5i_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
+{
+ struct mlx5e_priv *priv = mlx5i_epriv(dev);
+ struct ethtool_rx_flow_spec *fs = &cmd->fs;
+
+ if (mlx5i_flow_type_mask(fs->flow_type) == ETHER_FLOW)
+ return -EINVAL;
+
+ return mlx5e_ethtool_set_rxnfc(priv, cmd);
+}
+
+static int mlx5i_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
+ u32 *rule_locs)
+{
+ struct mlx5e_priv *priv = mlx5i_epriv(dev);
+
+ return mlx5e_ethtool_get_rxnfc(priv, info, rule_locs);
+}
+#endif
+
const struct ethtool_ops mlx5i_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES |
@@ -233,6 +259,10 @@ const struct ethtool_ops mlx5i_ethtool_ops = {
.get_coalesce = mlx5i_get_coalesce,
.set_coalesce = mlx5i_set_coalesce,
.get_ts_info = mlx5i_get_ts_info,
+#ifdef CONFIG_MLX5_EN_RXNFC
+ .get_rxnfc = mlx5i_get_rxnfc,
+ .set_rxnfc = mlx5i_set_rxnfc,
+#endif
.get_link_ksettings = mlx5i_get_link_ksettings,
.get_link = ethtool_op_get_link,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index 269ebb53eda6..3b8d8ada1a01 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -219,7 +219,7 @@ void mlx5i_uninit_underlay_qp(struct mlx5e_priv *priv)
int mlx5i_create_underlay_qp(struct mlx5e_priv *priv)
{
- unsigned char *dev_addr = priv->netdev->dev_addr;
+ const unsigned char *dev_addr = priv->netdev->dev_addr;
u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {};
u32 in[MLX5_ST_SZ_DW(create_qp_in)] = {};
struct mlx5i_priv *ipriv = priv->ppriv;
@@ -336,6 +336,8 @@ static int mlx5i_create_flow_steering(struct mlx5e_priv *priv)
goto err_destroy_arfs_tables;
}
+ mlx5e_ethtool_init_steering(priv);
+
return 0;
err_destroy_arfs_tables:
@@ -348,6 +350,7 @@ static void mlx5i_destroy_flow_steering(struct mlx5e_priv *priv)
{
mlx5e_destroy_ttc_table(priv);
mlx5e_arfs_destroy_tables(priv);
+ mlx5e_ethtool_cleanup_steering(priv);
}
static int mlx5i_init_rx(struct mlx5e_priv *priv)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
index d2105c1635c3..48d2ea690d7a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
@@ -38,7 +38,7 @@
#include "mlx5_core.h"
#include "eswitch.h"
#include "lag.h"
-#include "lag_mp.h"
+#include "mp.h"
/* General purpose, use for short periods of time.
* Beware of lock dependencies (preferably, no locks should be acquired
@@ -47,16 +47,21 @@
static DEFINE_SPINLOCK(lag_lock);
static int mlx5_cmd_create_lag(struct mlx5_core_dev *dev, u8 remap_port1,
- u8 remap_port2, bool shared_fdb)
+ u8 remap_port2, bool shared_fdb, u8 flags)
{
u32 in[MLX5_ST_SZ_DW(create_lag_in)] = {};
void *lag_ctx = MLX5_ADDR_OF(create_lag_in, in, ctx);
MLX5_SET(create_lag_in, in, opcode, MLX5_CMD_OP_CREATE_LAG);
- MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, remap_port1);
- MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, remap_port2);
MLX5_SET(lagc, lag_ctx, fdb_selection_mode, shared_fdb);
+ if (!(flags & MLX5_LAG_FLAG_HASH_BASED)) {
+ MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, remap_port1);
+ MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, remap_port2);
+ } else {
+ MLX5_SET(lagc, lag_ctx, port_select_mode,
+ MLX5_LAG_PORT_SELECT_MODE_PORT_SELECT_FT);
+ }
return mlx5_cmd_exec_in(dev, create_lag, in);
}
@@ -199,6 +204,15 @@ static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker,
*port1 = 2;
}
+static int _mlx5_modify_lag(struct mlx5_lag *ldev, u8 v2p_port1, u8 v2p_port2)
+{
+ struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
+
+ if (ldev->flags & MLX5_LAG_FLAG_HASH_BASED)
+ return mlx5_lag_port_sel_modify(ldev, v2p_port1, v2p_port2);
+ return mlx5_cmd_modify_lag(dev0, v2p_port1, v2p_port2);
+}
+
void mlx5_modify_lag(struct mlx5_lag *ldev,
struct lag_tracker *tracker)
{
@@ -211,39 +225,56 @@ void mlx5_modify_lag(struct mlx5_lag *ldev,
if (v2p_port1 != ldev->v2p_map[MLX5_LAG_P1] ||
v2p_port2 != ldev->v2p_map[MLX5_LAG_P2]) {
+ err = _mlx5_modify_lag(ldev, v2p_port1, v2p_port2);
+ if (err) {
+ mlx5_core_err(dev0,
+ "Failed to modify LAG (%d)\n",
+ err);
+ return;
+ }
ldev->v2p_map[MLX5_LAG_P1] = v2p_port1;
ldev->v2p_map[MLX5_LAG_P2] = v2p_port2;
-
mlx5_core_info(dev0, "modify lag map port 1:%d port 2:%d",
ldev->v2p_map[MLX5_LAG_P1],
ldev->v2p_map[MLX5_LAG_P2]);
-
- err = mlx5_cmd_modify_lag(dev0, v2p_port1, v2p_port2);
- if (err)
- mlx5_core_err(dev0,
- "Failed to modify LAG (%d)\n",
- err);
}
}
+static void mlx5_lag_set_port_sel_mode(struct mlx5_lag *ldev,
+ struct lag_tracker *tracker, u8 *flags)
+{
+ bool roce_lag = !!(*flags & MLX5_LAG_FLAG_ROCE);
+ struct lag_func *dev0 = &ldev->pf[MLX5_LAG_P1];
+
+ if (roce_lag ||
+ !MLX5_CAP_PORT_SELECTION(dev0->dev, port_select_flow_table) ||
+ tracker->tx_type != NETDEV_LAG_TX_TYPE_HASH)
+ return;
+ *flags |= MLX5_LAG_FLAG_HASH_BASED;
+}
+
+static char *get_str_port_sel_mode(u8 flags)
+{
+ if (flags & MLX5_LAG_FLAG_HASH_BASED)
+ return "hash";
+ return "queue_affinity";
+}
+
static int mlx5_create_lag(struct mlx5_lag *ldev,
struct lag_tracker *tracker,
- bool shared_fdb)
+ bool shared_fdb, u8 flags)
{
struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev;
u32 in[MLX5_ST_SZ_DW(destroy_lag_in)] = {};
int err;
- mlx5_infer_tx_affinity_mapping(tracker, &ldev->v2p_map[MLX5_LAG_P1],
- &ldev->v2p_map[MLX5_LAG_P2]);
-
- mlx5_core_info(dev0, "lag map port 1:%d port 2:%d shared_fdb:%d",
+ mlx5_core_info(dev0, "lag map port 1:%d port 2:%d shared_fdb:%d mode:%s",
ldev->v2p_map[MLX5_LAG_P1], ldev->v2p_map[MLX5_LAG_P2],
- shared_fdb);
+ shared_fdb, get_str_port_sel_mode(flags));
err = mlx5_cmd_create_lag(dev0, ldev->v2p_map[MLX5_LAG_P1],
- ldev->v2p_map[MLX5_LAG_P2], shared_fdb);
+ ldev->v2p_map[MLX5_LAG_P2], shared_fdb, flags);
if (err) {
mlx5_core_err(dev0,
"Failed to create LAG (%d)\n",
@@ -279,16 +310,32 @@ int mlx5_activate_lag(struct mlx5_lag *ldev,
struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
int err;
- err = mlx5_create_lag(ldev, tracker, shared_fdb);
+ mlx5_infer_tx_affinity_mapping(tracker, &ldev->v2p_map[MLX5_LAG_P1],
+ &ldev->v2p_map[MLX5_LAG_P2]);
+ mlx5_lag_set_port_sel_mode(ldev, tracker, &flags);
+ if (flags & MLX5_LAG_FLAG_HASH_BASED) {
+ err = mlx5_lag_port_sel_create(ldev, tracker->hash_type,
+ ldev->v2p_map[MLX5_LAG_P1],
+ ldev->v2p_map[MLX5_LAG_P2]);
+ if (err) {
+ mlx5_core_err(dev0,
+ "Failed to create LAG port selection(%d)\n",
+ err);
+ return err;
+ }
+ }
+
+ err = mlx5_create_lag(ldev, tracker, shared_fdb, flags);
if (err) {
- if (roce_lag) {
+ if (flags & MLX5_LAG_FLAG_HASH_BASED)
+ mlx5_lag_port_sel_destroy(ldev);
+ if (roce_lag)
mlx5_core_err(dev0,
"Failed to activate RoCE LAG\n");
- } else {
+ else
mlx5_core_err(dev0,
"Failed to activate VF LAG\n"
"Make sure all VFs are unbound prior to VF LAG activation or deactivation\n");
- }
return err;
}
@@ -302,6 +349,7 @@ static int mlx5_deactivate_lag(struct mlx5_lag *ldev)
struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
u32 in[MLX5_ST_SZ_DW(destroy_lag_in)] = {};
bool roce_lag = __mlx5_lag_is_roce(ldev);
+ u8 flags = ldev->flags;
int err;
ldev->flags &= ~MLX5_LAG_MODE_FLAGS;
@@ -324,6 +372,8 @@ static int mlx5_deactivate_lag(struct mlx5_lag *ldev)
"Failed to deactivate VF LAG; driver restart required\n"
"Make sure all VFs are unbound prior to VF LAG activation or deactivation\n");
}
+ } else if (flags & MLX5_LAG_FLAG_HASH_BASED) {
+ mlx5_lag_port_sel_destroy(ldev);
}
return err;
@@ -592,8 +642,10 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
if (!(bond_status & 0x3))
return 0;
- if (lag_upper_info)
+ if (lag_upper_info) {
tracker->tx_type = lag_upper_info->tx_type;
+ tracker->hash_type = lag_upper_info->hash_type;
+ }
/* Determine bonding status:
* A device is considered bonded if both its physical ports are slaves
@@ -692,7 +744,7 @@ static void mlx5_ldev_add_netdev(struct mlx5_lag *ldev,
struct mlx5_core_dev *dev,
struct net_device *netdev)
{
- unsigned int fn = PCI_FUNC(dev->pdev->devfn);
+ unsigned int fn = mlx5_get_dev_index(dev);
if (fn >= MLX5_MAX_PORTS)
return;
@@ -722,7 +774,7 @@ static void mlx5_ldev_remove_netdev(struct mlx5_lag *ldev,
static void mlx5_ldev_add_mdev(struct mlx5_lag *ldev,
struct mlx5_core_dev *dev)
{
- unsigned int fn = PCI_FUNC(dev->pdev->devfn);
+ unsigned int fn = mlx5_get_dev_index(dev);
if (fn >= MLX5_MAX_PORTS)
return;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.h b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
index d4bae528954e..e5d231c31b54 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
@@ -5,7 +5,8 @@
#define __MLX5_LAG_H__
#include "mlx5_core.h"
-#include "lag_mp.h"
+#include "mp.h"
+#include "port_sel.h"
enum {
MLX5_LAG_P1,
@@ -17,10 +18,12 @@ enum {
MLX5_LAG_FLAG_SRIOV = 1 << 1,
MLX5_LAG_FLAG_MULTIPATH = 1 << 2,
MLX5_LAG_FLAG_READY = 1 << 3,
+ MLX5_LAG_FLAG_HASH_BASED = 1 << 4,
};
#define MLX5_LAG_MODE_FLAGS (MLX5_LAG_FLAG_ROCE | MLX5_LAG_FLAG_SRIOV |\
- MLX5_LAG_FLAG_MULTIPATH)
+ MLX5_LAG_FLAG_MULTIPATH | \
+ MLX5_LAG_FLAG_HASH_BASED)
struct lag_func {
struct mlx5_core_dev *dev;
@@ -32,6 +35,7 @@ struct lag_tracker {
enum netdev_lag_tx_type tx_type;
struct netdev_lag_lower_state_info netdev_state[MLX5_MAX_PORTS];
unsigned int is_bonded:1;
+ enum netdev_lag_hash hash_type;
};
/* LAG data of a ConnectX card.
@@ -49,6 +53,7 @@ struct mlx5_lag {
struct delayed_work bond_work;
struct notifier_block nb;
struct lag_mp lag_mp;
+ struct mlx5_lag_port_sel port_sel;
};
static inline struct mlx5_lag *
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c
index 21fdaf708f1f..bf4d3cbefa63 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c
@@ -3,8 +3,8 @@
#include <linux/netdevice.h>
#include <net/nexthop.h>
-#include "lag.h"
-#include "lag_mp.h"
+#include "lag/lag.h"
+#include "lag/mp.h"
#include "mlx5_core.h"
#include "eswitch.h"
#include "lib/mlx5.h"
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.h b/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.h
index dea199e79bed..dea199e79bed 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.h
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
new file mode 100644
index 000000000000..adc836b3d857
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
@@ -0,0 +1,611 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. */
+
+#include <linux/netdevice.h>
+#include "lag.h"
+
+enum {
+ MLX5_LAG_FT_LEVEL_TTC,
+ MLX5_LAG_FT_LEVEL_INNER_TTC,
+ MLX5_LAG_FT_LEVEL_DEFINER,
+};
+
+static struct mlx5_flow_group *
+mlx5_create_hash_flow_group(struct mlx5_flow_table *ft,
+ struct mlx5_flow_definer *definer)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_group *fg;
+ u32 *in;
+
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return ERR_PTR(-ENOMEM);
+
+ MLX5_SET(create_flow_group_in, in, match_definer_id,
+ mlx5_get_match_definer_id(definer));
+ MLX5_SET(create_flow_group_in, in, start_flow_index, 0);
+ MLX5_SET(create_flow_group_in, in, end_flow_index, MLX5_MAX_PORTS - 1);
+ MLX5_SET(create_flow_group_in, in, group_type,
+ MLX5_CREATE_FLOW_GROUP_IN_GROUP_TYPE_HASH_SPLIT);
+
+ fg = mlx5_create_flow_group(ft, in);
+ kvfree(in);
+ return fg;
+}
+
+static int mlx5_lag_create_port_sel_table(struct mlx5_lag *ldev,
+ struct mlx5_lag_definer *lag_definer,
+ u8 port1, u8 port2)
+{
+ struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_flow_destination dest = {};
+ MLX5_DECLARE_FLOW_ACT(flow_act);
+ struct mlx5_flow_namespace *ns;
+ int err, i;
+
+ ft_attr.max_fte = MLX5_MAX_PORTS;
+ ft_attr.level = MLX5_LAG_FT_LEVEL_DEFINER;
+
+ ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_PORT_SEL);
+ if (!ns) {
+ mlx5_core_warn(dev, "Failed to get port selection namespace\n");
+ return -EOPNOTSUPP;
+ }
+
+ lag_definer->ft = mlx5_create_flow_table(ns, &ft_attr);
+ if (IS_ERR(lag_definer->ft)) {
+ mlx5_core_warn(dev, "Failed to create port selection table\n");
+ return PTR_ERR(lag_definer->ft);
+ }
+
+ lag_definer->fg = mlx5_create_hash_flow_group(lag_definer->ft,
+ lag_definer->definer);
+ if (IS_ERR(lag_definer->fg)) {
+ err = PTR_ERR(lag_definer->fg);
+ goto destroy_ft;
+ }
+
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_UPLINK;
+ dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
+ flow_act.flags |= FLOW_ACT_NO_APPEND;
+ for (i = 0; i < MLX5_MAX_PORTS; i++) {
+ u8 affinity = i == 0 ? port1 : port2;
+
+ dest.vport.vhca_id = MLX5_CAP_GEN(ldev->pf[affinity - 1].dev,
+ vhca_id);
+ lag_definer->rules[i] = mlx5_add_flow_rules(lag_definer->ft,
+ NULL, &flow_act,
+ &dest, 1);
+ if (IS_ERR(lag_definer->rules[i])) {
+ err = PTR_ERR(lag_definer->rules[i]);
+ while (i--)
+ mlx5_del_flow_rules(lag_definer->rules[i]);
+ goto destroy_fg;
+ }
+ }
+
+ return 0;
+
+destroy_fg:
+ mlx5_destroy_flow_group(lag_definer->fg);
+destroy_ft:
+ mlx5_destroy_flow_table(lag_definer->ft);
+ return err;
+}
+
+static int mlx5_lag_set_definer_inner(u32 *match_definer_mask,
+ enum mlx5_traffic_types tt)
+{
+ int format_id;
+ u8 *ipv6;
+
+ switch (tt) {
+ case MLX5_TT_IPV4_UDP:
+ case MLX5_TT_IPV4_TCP:
+ format_id = 23;
+ MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
+ inner_l4_sport);
+ MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
+ inner_l4_dport);
+ MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
+ inner_ip_src_addr);
+ MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
+ inner_ip_dest_addr);
+ break;
+ case MLX5_TT_IPV4:
+ format_id = 23;
+ MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
+ inner_l3_type);
+ MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
+ inner_dmac_47_16);
+ MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
+ inner_dmac_15_0);
+ MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
+ inner_smac_47_16);
+ MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
+ inner_smac_15_0);
+ MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
+ inner_ip_src_addr);
+ MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
+ inner_ip_dest_addr);
+ break;
+ case MLX5_TT_IPV6_TCP:
+ case MLX5_TT_IPV6_UDP:
+ format_id = 31;
+ MLX5_SET_TO_ONES(match_definer_format_31, match_definer_mask,
+ inner_l4_sport);
+ MLX5_SET_TO_ONES(match_definer_format_31, match_definer_mask,
+ inner_l4_dport);
+ ipv6 = MLX5_ADDR_OF(match_definer_format_31, match_definer_mask,
+ inner_ip_dest_addr);
+ memset(ipv6, 0xff, 16);
+ ipv6 = MLX5_ADDR_OF(match_definer_format_31, match_definer_mask,
+ inner_ip_src_addr);
+ memset(ipv6, 0xff, 16);
+ break;
+ case MLX5_TT_IPV6:
+ format_id = 32;
+ ipv6 = MLX5_ADDR_OF(match_definer_format_32, match_definer_mask,
+ inner_ip_dest_addr);
+ memset(ipv6, 0xff, 16);
+ ipv6 = MLX5_ADDR_OF(match_definer_format_32, match_definer_mask,
+ inner_ip_src_addr);
+ memset(ipv6, 0xff, 16);
+ MLX5_SET_TO_ONES(match_definer_format_32, match_definer_mask,
+ inner_dmac_47_16);
+ MLX5_SET_TO_ONES(match_definer_format_32, match_definer_mask,
+ inner_dmac_15_0);
+ MLX5_SET_TO_ONES(match_definer_format_32, match_definer_mask,
+ inner_smac_47_16);
+ MLX5_SET_TO_ONES(match_definer_format_32, match_definer_mask,
+ inner_smac_15_0);
+ break;
+ default:
+ format_id = 23;
+ MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
+ inner_l3_type);
+ MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
+ inner_dmac_47_16);
+ MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
+ inner_dmac_15_0);
+ MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
+ inner_smac_47_16);
+ MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
+ inner_smac_15_0);
+ break;
+ }
+
+ return format_id;
+}
+
+static int mlx5_lag_set_definer(u32 *match_definer_mask,
+ enum mlx5_traffic_types tt, bool tunnel,
+ enum netdev_lag_hash hash)
+{
+ int format_id;
+ u8 *ipv6;
+
+ if (tunnel)
+ return mlx5_lag_set_definer_inner(match_definer_mask, tt);
+
+ switch (tt) {
+ case MLX5_TT_IPV4_UDP:
+ case MLX5_TT_IPV4_TCP:
+ format_id = 22;
+ MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask,
+ outer_l4_sport);
+ MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask,
+ outer_l4_dport);
+ MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask,
+ outer_ip_src_addr);
+ MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask,
+ outer_ip_dest_addr);
+ break;
+ case MLX5_TT_IPV4:
+ format_id = 22;
+ MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask,
+ outer_l3_type);
+ MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask,
+ outer_dmac_47_16);
+ MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask,
+ outer_dmac_15_0);
+ MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask,
+ outer_smac_47_16);
+ MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask,
+ outer_smac_15_0);
+ MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask,
+ outer_ip_src_addr);
+ MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask,
+ outer_ip_dest_addr);
+ break;
+ case MLX5_TT_IPV6_TCP:
+ case MLX5_TT_IPV6_UDP:
+ format_id = 29;
+ MLX5_SET_TO_ONES(match_definer_format_29, match_definer_mask,
+ outer_l4_sport);
+ MLX5_SET_TO_ONES(match_definer_format_29, match_definer_mask,
+ outer_l4_dport);
+ ipv6 = MLX5_ADDR_OF(match_definer_format_29, match_definer_mask,
+ outer_ip_dest_addr);
+ memset(ipv6, 0xff, 16);
+ ipv6 = MLX5_ADDR_OF(match_definer_format_29, match_definer_mask,
+ outer_ip_src_addr);
+ memset(ipv6, 0xff, 16);
+ break;
+ case MLX5_TT_IPV6:
+ format_id = 30;
+ ipv6 = MLX5_ADDR_OF(match_definer_format_30, match_definer_mask,
+ outer_ip_dest_addr);
+ memset(ipv6, 0xff, 16);
+ ipv6 = MLX5_ADDR_OF(match_definer_format_30, match_definer_mask,
+ outer_ip_src_addr);
+ memset(ipv6, 0xff, 16);
+ MLX5_SET_TO_ONES(match_definer_format_30, match_definer_mask,
+ outer_dmac_47_16);
+ MLX5_SET_TO_ONES(match_definer_format_30, match_definer_mask,
+ outer_dmac_15_0);
+ MLX5_SET_TO_ONES(match_definer_format_30, match_definer_mask,
+ outer_smac_47_16);
+ MLX5_SET_TO_ONES(match_definer_format_30, match_definer_mask,
+ outer_smac_15_0);
+ break;
+ default:
+ format_id = 0;
+ MLX5_SET_TO_ONES(match_definer_format_0, match_definer_mask,
+ outer_smac_47_16);
+ MLX5_SET_TO_ONES(match_definer_format_0, match_definer_mask,
+ outer_smac_15_0);
+
+ if (hash == NETDEV_LAG_HASH_VLAN_SRCMAC) {
+ MLX5_SET_TO_ONES(match_definer_format_0,
+ match_definer_mask,
+ outer_first_vlan_vid);
+ break;
+ }
+
+ MLX5_SET_TO_ONES(match_definer_format_0, match_definer_mask,
+ outer_ethertype);
+ MLX5_SET_TO_ONES(match_definer_format_0, match_definer_mask,
+ outer_dmac_47_16);
+ MLX5_SET_TO_ONES(match_definer_format_0, match_definer_mask,
+ outer_dmac_15_0);
+ break;
+ }
+
+ return format_id;
+}
+
+static struct mlx5_lag_definer *
+mlx5_lag_create_definer(struct mlx5_lag *ldev, enum netdev_lag_hash hash,
+ enum mlx5_traffic_types tt, bool tunnel, u8 port1,
+ u8 port2)
+{
+ struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
+ struct mlx5_lag_definer *lag_definer;
+ u32 *match_definer_mask;
+ int format_id, err;
+
+ lag_definer = kzalloc(sizeof(*lag_definer), GFP_KERNEL);
+ if (!lag_definer)
+ return ERR_PTR(ENOMEM);
+
+ match_definer_mask = kvzalloc(MLX5_FLD_SZ_BYTES(match_definer,
+ match_mask),
+ GFP_KERNEL);
+ if (!match_definer_mask) {
+ err = -ENOMEM;
+ goto free_lag_definer;
+ }
+
+ format_id = mlx5_lag_set_definer(match_definer_mask, tt, tunnel, hash);
+ lag_definer->definer =
+ mlx5_create_match_definer(dev, MLX5_FLOW_NAMESPACE_PORT_SEL,
+ format_id, match_definer_mask);
+ if (IS_ERR(lag_definer->definer)) {
+ err = PTR_ERR(lag_definer->definer);
+ goto free_mask;
+ }
+
+ err = mlx5_lag_create_port_sel_table(ldev, lag_definer, port1, port2);
+ if (err)
+ goto destroy_match_definer;
+
+ kvfree(match_definer_mask);
+
+ return lag_definer;
+
+destroy_match_definer:
+ mlx5_destroy_match_definer(dev, lag_definer->definer);
+free_mask:
+ kvfree(match_definer_mask);
+free_lag_definer:
+ kfree(lag_definer);
+ return ERR_PTR(err);
+}
+
+static void mlx5_lag_destroy_definer(struct mlx5_lag *ldev,
+ struct mlx5_lag_definer *lag_definer)
+{
+ struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
+ int i;
+
+ for (i = 0; i < MLX5_MAX_PORTS; i++)
+ mlx5_del_flow_rules(lag_definer->rules[i]);
+ mlx5_destroy_flow_group(lag_definer->fg);
+ mlx5_destroy_flow_table(lag_definer->ft);
+ mlx5_destroy_match_definer(dev, lag_definer->definer);
+ kfree(lag_definer);
+}
+
+static void mlx5_lag_destroy_definers(struct mlx5_lag *ldev)
+{
+ struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
+ int tt;
+
+ for_each_set_bit(tt, port_sel->tt_map, MLX5_NUM_TT) {
+ if (port_sel->outer.definers[tt])
+ mlx5_lag_destroy_definer(ldev,
+ port_sel->outer.definers[tt]);
+ if (port_sel->inner.definers[tt])
+ mlx5_lag_destroy_definer(ldev,
+ port_sel->inner.definers[tt]);
+ }
+}
+
+static int mlx5_lag_create_definers(struct mlx5_lag *ldev,
+ enum netdev_lag_hash hash_type,
+ u8 port1, u8 port2)
+{
+ struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
+ struct mlx5_lag_definer *lag_definer;
+ int tt, err;
+
+ for_each_set_bit(tt, port_sel->tt_map, MLX5_NUM_TT) {
+ lag_definer = mlx5_lag_create_definer(ldev, hash_type, tt,
+ false, port1, port2);
+ if (IS_ERR(lag_definer)) {
+ err = PTR_ERR(lag_definer);
+ goto destroy_definers;
+ }
+ port_sel->outer.definers[tt] = lag_definer;
+
+ if (!port_sel->tunnel)
+ continue;
+
+ lag_definer =
+ mlx5_lag_create_definer(ldev, hash_type, tt,
+ true, port1, port2);
+ if (IS_ERR(lag_definer)) {
+ err = PTR_ERR(lag_definer);
+ goto destroy_definers;
+ }
+ port_sel->inner.definers[tt] = lag_definer;
+ }
+
+ return 0;
+
+destroy_definers:
+ mlx5_lag_destroy_definers(ldev);
+ return err;
+}
+
+static void set_tt_map(struct mlx5_lag_port_sel *port_sel,
+ enum netdev_lag_hash hash)
+{
+ port_sel->tunnel = false;
+
+ switch (hash) {
+ case NETDEV_LAG_HASH_E34:
+ port_sel->tunnel = true;
+ fallthrough;
+ case NETDEV_LAG_HASH_L34:
+ set_bit(MLX5_TT_IPV4_TCP, port_sel->tt_map);
+ set_bit(MLX5_TT_IPV4_UDP, port_sel->tt_map);
+ set_bit(MLX5_TT_IPV6_TCP, port_sel->tt_map);
+ set_bit(MLX5_TT_IPV6_UDP, port_sel->tt_map);
+ set_bit(MLX5_TT_IPV4, port_sel->tt_map);
+ set_bit(MLX5_TT_IPV6, port_sel->tt_map);
+ set_bit(MLX5_TT_ANY, port_sel->tt_map);
+ break;
+ case NETDEV_LAG_HASH_E23:
+ port_sel->tunnel = true;
+ fallthrough;
+ case NETDEV_LAG_HASH_L23:
+ set_bit(MLX5_TT_IPV4, port_sel->tt_map);
+ set_bit(MLX5_TT_IPV6, port_sel->tt_map);
+ set_bit(MLX5_TT_ANY, port_sel->tt_map);
+ break;
+ default:
+ set_bit(MLX5_TT_ANY, port_sel->tt_map);
+ break;
+ }
+}
+
+#define SET_IGNORE_DESTS_BITS(tt_map, dests) \
+ do { \
+ int idx; \
+ \
+ for_each_clear_bit(idx, tt_map, MLX5_NUM_TT) \
+ set_bit(idx, dests); \
+ } while (0)
+
+static void mlx5_lag_set_inner_ttc_params(struct mlx5_lag *ldev,
+ struct ttc_params *ttc_params)
+{
+ struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
+ struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
+ struct mlx5_flow_table_attr *ft_attr;
+ int tt;
+
+ ttc_params->ns = mlx5_get_flow_namespace(dev,
+ MLX5_FLOW_NAMESPACE_PORT_SEL);
+ ft_attr = &ttc_params->ft_attr;
+ ft_attr->level = MLX5_LAG_FT_LEVEL_INNER_TTC;
+
+ for_each_set_bit(tt, port_sel->tt_map, MLX5_NUM_TT) {
+ ttc_params->dests[tt].type =
+ MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ ttc_params->dests[tt].ft = port_sel->inner.definers[tt]->ft;
+ }
+ SET_IGNORE_DESTS_BITS(port_sel->tt_map, ttc_params->ignore_dests);
+}
+
+static void mlx5_lag_set_outer_ttc_params(struct mlx5_lag *ldev,
+ struct ttc_params *ttc_params)
+{
+ struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
+ struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
+ struct mlx5_flow_table_attr *ft_attr;
+ int tt;
+
+ ttc_params->ns = mlx5_get_flow_namespace(dev,
+ MLX5_FLOW_NAMESPACE_PORT_SEL);
+ ft_attr = &ttc_params->ft_attr;
+ ft_attr->level = MLX5_LAG_FT_LEVEL_TTC;
+
+ for_each_set_bit(tt, port_sel->tt_map, MLX5_NUM_TT) {
+ ttc_params->dests[tt].type =
+ MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ ttc_params->dests[tt].ft = port_sel->outer.definers[tt]->ft;
+ }
+ SET_IGNORE_DESTS_BITS(port_sel->tt_map, ttc_params->ignore_dests);
+
+ ttc_params->inner_ttc = port_sel->tunnel;
+ if (!port_sel->tunnel)
+ return;
+
+ for (tt = 0; tt < MLX5_NUM_TUNNEL_TT; tt++) {
+ ttc_params->tunnel_dests[tt].type =
+ MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ ttc_params->tunnel_dests[tt].ft =
+ mlx5_get_ttc_flow_table(port_sel->inner.ttc);
+ }
+}
+
+static int mlx5_lag_create_ttc_table(struct mlx5_lag *ldev)
+{
+ struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
+ struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
+ struct ttc_params ttc_params = {};
+
+ mlx5_lag_set_outer_ttc_params(ldev, &ttc_params);
+ port_sel->outer.ttc = mlx5_create_ttc_table(dev, &ttc_params);
+ if (IS_ERR(port_sel->outer.ttc))
+ return PTR_ERR(port_sel->outer.ttc);
+
+ return 0;
+}
+
+static int mlx5_lag_create_inner_ttc_table(struct mlx5_lag *ldev)
+{
+ struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
+ struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
+ struct ttc_params ttc_params = {};
+
+ mlx5_lag_set_inner_ttc_params(ldev, &ttc_params);
+ port_sel->inner.ttc = mlx5_create_ttc_table(dev, &ttc_params);
+ if (IS_ERR(port_sel->inner.ttc))
+ return PTR_ERR(port_sel->inner.ttc);
+
+ return 0;
+}
+
+int mlx5_lag_port_sel_create(struct mlx5_lag *ldev,
+ enum netdev_lag_hash hash_type, u8 port1, u8 port2)
+{
+ struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
+ int err;
+
+ set_tt_map(port_sel, hash_type);
+ err = mlx5_lag_create_definers(ldev, hash_type, port1, port2);
+ if (err)
+ return err;
+
+ if (port_sel->tunnel) {
+ err = mlx5_lag_create_inner_ttc_table(ldev);
+ if (err)
+ goto destroy_definers;
+ }
+
+ err = mlx5_lag_create_ttc_table(ldev);
+ if (err)
+ goto destroy_inner;
+
+ return 0;
+
+destroy_inner:
+ if (port_sel->tunnel)
+ mlx5_destroy_ttc_table(port_sel->inner.ttc);
+destroy_definers:
+ mlx5_lag_destroy_definers(ldev);
+ return err;
+}
+
+static int
+mlx5_lag_modify_definers_destinations(struct mlx5_lag *ldev,
+ struct mlx5_lag_definer **definers,
+ u8 port1, u8 port2)
+{
+ struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
+ struct mlx5_flow_destination dest = {};
+ int err;
+ int tt;
+
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_UPLINK;
+ dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
+
+ for_each_set_bit(tt, port_sel->tt_map, MLX5_NUM_TT) {
+ struct mlx5_flow_handle **rules = definers[tt]->rules;
+
+ if (ldev->v2p_map[MLX5_LAG_P1] != port1) {
+ dest.vport.vhca_id =
+ MLX5_CAP_GEN(ldev->pf[port1 - 1].dev, vhca_id);
+ err = mlx5_modify_rule_destination(rules[MLX5_LAG_P1],
+ &dest, NULL);
+ if (err)
+ return err;
+ }
+
+ if (ldev->v2p_map[MLX5_LAG_P2] != port2) {
+ dest.vport.vhca_id =
+ MLX5_CAP_GEN(ldev->pf[port2 - 1].dev, vhca_id);
+ err = mlx5_modify_rule_destination(rules[MLX5_LAG_P2],
+ &dest, NULL);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+int mlx5_lag_port_sel_modify(struct mlx5_lag *ldev, u8 port1, u8 port2)
+{
+ struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
+ int err;
+
+ err = mlx5_lag_modify_definers_destinations(ldev,
+ port_sel->outer.definers,
+ port1, port2);
+ if (err)
+ return err;
+
+ if (!port_sel->tunnel)
+ return 0;
+
+ return mlx5_lag_modify_definers_destinations(ldev,
+ port_sel->inner.definers,
+ port1, port2);
+}
+
+void mlx5_lag_port_sel_destroy(struct mlx5_lag *ldev)
+{
+ struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
+
+ mlx5_destroy_ttc_table(port_sel->outer.ttc);
+ if (port_sel->tunnel)
+ mlx5_destroy_ttc_table(port_sel->inner.ttc);
+ mlx5_lag_destroy_definers(ldev);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.h b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.h
new file mode 100644
index 000000000000..6d15b28a42fc
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. */
+
+#ifndef __MLX5_LAG_FS_H__
+#define __MLX5_LAG_FS_H__
+
+#include "lib/fs_ttc.h"
+
+struct mlx5_lag_definer {
+ struct mlx5_flow_definer *definer;
+ struct mlx5_flow_table *ft;
+ struct mlx5_flow_group *fg;
+ struct mlx5_flow_handle *rules[MLX5_MAX_PORTS];
+};
+
+struct mlx5_lag_ttc {
+ struct mlx5_ttc_table *ttc;
+ struct mlx5_lag_definer *definers[MLX5_NUM_TT];
+};
+
+struct mlx5_lag_port_sel {
+ DECLARE_BITMAP(tt_map, MLX5_NUM_TT);
+ bool tunnel;
+ struct mlx5_lag_ttc outer;
+ struct mlx5_lag_ttc inner;
+};
+
+#ifdef CONFIG_MLX5_ESWITCH
+
+int mlx5_lag_port_sel_modify(struct mlx5_lag *ldev, u8 port1, u8 port2);
+void mlx5_lag_port_sel_destroy(struct mlx5_lag *ldev);
+int mlx5_lag_port_sel_create(struct mlx5_lag *ldev,
+ enum netdev_lag_hash hash_type, u8 port1,
+ u8 port2);
+
+#else /* CONFIG_MLX5_ESWITCH */
+static inline int mlx5_lag_port_sel_create(struct mlx5_lag *ldev,
+ enum netdev_lag_hash hash_type,
+ u8 port1, u8 port2)
+{
+ return 0;
+}
+
+static inline int mlx5_lag_port_sel_modify(struct mlx5_lag *ldev, u8 port1,
+ u8 port2)
+{
+ return 0;
+}
+
+static inline void mlx5_lag_port_sel_destroy(struct mlx5_lag *ldev) {}
+#endif /* CONFIG_MLX5_ESWITCH */
+#endif /* __MLX5_LAG_FS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c
index 749d17c0057d..b63dec24747a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c
@@ -247,6 +247,8 @@ static int mlx5_generate_ttc_table_rules(struct mlx5_core_dev *dev,
for (tt = 0; tt < MLX5_NUM_TT; tt++) {
struct mlx5_ttc_rule *rule = &rules[tt];
+ if (test_bit(tt, params->ignore_dests))
+ continue;
rule->rule = mlx5_generate_ttc_rule(dev, ft, &params->dests[tt],
ttc_rules[tt].etype,
ttc_rules[tt].proto);
@@ -266,6 +268,8 @@ static int mlx5_generate_ttc_table_rules(struct mlx5_core_dev *dev,
if (!mlx5_tunnel_proto_supported_rx(dev,
ttc_tunnel_rules[tt].proto))
continue;
+ if (test_bit(tt, params->ignore_tunnel_dests))
+ continue;
trules[tt] = mlx5_generate_ttc_rule(dev, ft,
&params->tunnel_dests[tt],
ttc_tunnel_rules[tt].etype,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.h
index ce95be8f8382..85fef0cd1c07 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.h
@@ -43,7 +43,9 @@ struct ttc_params {
struct mlx5_flow_namespace *ns;
struct mlx5_flow_table_attr ft_attr;
struct mlx5_flow_destination dests[MLX5_NUM_TT];
+ DECLARE_BITMAP(ignore_dests, MLX5_NUM_TT);
bool inner_ttc;
+ DECLARE_BITMAP(ignore_tunnel_dests, MLX5_NUM_TUNNEL_TT);
struct mlx5_flow_destination tunnel_dests[MLX5_NUM_TUNNEL_TT];
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.c
new file mode 100644
index 000000000000..0dd96a6b140d
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.c
@@ -0,0 +1,162 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#include <linux/mlx5/driver.h>
+#include "lib/tout.h"
+
+struct mlx5_timeouts {
+ u64 to[MAX_TIMEOUT_TYPES];
+};
+
+static const u32 tout_def_sw_val[MAX_TIMEOUT_TYPES] = {
+ [MLX5_TO_FW_PRE_INIT_TIMEOUT_MS] = 120000,
+ [MLX5_TO_FW_PRE_INIT_WARN_MESSAGE_INTERVAL_MS] = 20000,
+ [MLX5_TO_FW_PRE_INIT_WAIT_MS] = 2,
+ [MLX5_TO_FW_INIT_MS] = 2000,
+ [MLX5_TO_CMD_MS] = 60000,
+ [MLX5_TO_PCI_TOGGLE_MS] = 2000,
+ [MLX5_TO_HEALTH_POLL_INTERVAL_MS] = 2000,
+ [MLX5_TO_FULL_CRDUMP_MS] = 60000,
+ [MLX5_TO_FW_RESET_MS] = 60000,
+ [MLX5_TO_FLUSH_ON_ERROR_MS] = 2000,
+ [MLX5_TO_PCI_SYNC_UPDATE_MS] = 5000,
+ [MLX5_TO_TEARDOWN_MS] = 3000,
+ [MLX5_TO_FSM_REACTIVATE_MS] = 5000,
+ [MLX5_TO_RECLAIM_PAGES_MS] = 5000,
+ [MLX5_TO_RECLAIM_VFS_PAGES_MS] = 120000
+};
+
+static void tout_set(struct mlx5_core_dev *dev, u64 val, enum mlx5_timeouts_types type)
+{
+ dev->timeouts->to[type] = val;
+}
+
+static void tout_set_def_val(struct mlx5_core_dev *dev)
+{
+ int i;
+
+ for (i = MLX5_TO_FW_PRE_INIT_TIMEOUT_MS; i < MAX_TIMEOUT_TYPES; i++)
+ tout_set(dev, tout_def_sw_val[i], i);
+}
+
+int mlx5_tout_init(struct mlx5_core_dev *dev)
+{
+ dev->timeouts = kmalloc(sizeof(*dev->timeouts), GFP_KERNEL);
+ if (!dev->timeouts)
+ return -ENOMEM;
+
+ tout_set_def_val(dev);
+ return 0;
+}
+
+void mlx5_tout_cleanup(struct mlx5_core_dev *dev)
+{
+ kfree(dev->timeouts);
+}
+
+/* Time register consists of two fields to_multiplier(time out multiplier)
+ * and to_value(time out value). to_value is the quantity of the time units and
+ * to_multiplier is the type and should be one off these four values.
+ * 0x0: millisecond
+ * 0x1: seconds
+ * 0x2: minutes
+ * 0x3: hours
+ * this function converts the time stored in the two register fields into
+ * millisecond.
+ */
+static u64 tout_convert_reg_field_to_ms(u32 to_mul, u32 to_val)
+{
+ u64 msec = to_val;
+
+ to_mul &= 0x3;
+ /* convert hours/minutes/seconds to miliseconds */
+ if (to_mul)
+ msec *= 1000 * int_pow(60, to_mul - 1);
+
+ return msec;
+}
+
+static u64 tout_convert_iseg_to_ms(u32 iseg_to)
+{
+ return tout_convert_reg_field_to_ms(iseg_to >> 29, iseg_to & 0xfffff);
+}
+
+static bool tout_is_supported(struct mlx5_core_dev *dev)
+{
+ return !!ioread32be(&dev->iseg->cmd_q_init_to);
+}
+
+void mlx5_tout_query_iseg(struct mlx5_core_dev *dev)
+{
+ u32 to;
+
+ if (!tout_is_supported(dev))
+ return;
+
+ to = ioread32be(&dev->iseg->cmd_q_init_to);
+ tout_set(dev, tout_convert_iseg_to_ms(to), MLX5_TO_FW_INIT_MS);
+
+ to = ioread32be(&dev->iseg->cmd_exec_to);
+ tout_set(dev, tout_convert_iseg_to_ms(to), MLX5_TO_CMD_MS);
+}
+
+u64 _mlx5_tout_ms(struct mlx5_core_dev *dev, enum mlx5_timeouts_types type)
+{
+ return dev->timeouts->to[type];
+}
+
+#define MLX5_TIMEOUT_QUERY(fld, reg_out) \
+ ({ \
+ struct mlx5_ifc_default_timeout_bits *time_field; \
+ u32 to_multi, to_value; \
+ u64 to_val_ms; \
+ \
+ time_field = MLX5_ADDR_OF(dtor_reg, reg_out, fld); \
+ to_multi = MLX5_GET(default_timeout, time_field, to_multiplier); \
+ to_value = MLX5_GET(default_timeout, time_field, to_value); \
+ to_val_ms = tout_convert_reg_field_to_ms(to_multi, to_value); \
+ to_val_ms; \
+ })
+
+#define MLX5_TIMEOUT_FILL(fld, reg_out, dev, to_type, to_extra) \
+ ({ \
+ u64 fw_to = MLX5_TIMEOUT_QUERY(fld, reg_out); \
+ tout_set(dev, fw_to + (to_extra), to_type); \
+ fw_to; \
+ })
+
+static int tout_query_dtor(struct mlx5_core_dev *dev)
+{
+ u64 pcie_toggle_to_val, tear_down_to_val;
+ u32 out[MLX5_ST_SZ_DW(dtor_reg)] = {};
+ u32 in[MLX5_ST_SZ_DW(dtor_reg)] = {};
+ int err;
+
+ err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), MLX5_REG_DTOR, 0, 0);
+ if (err)
+ return err;
+
+ pcie_toggle_to_val = MLX5_TIMEOUT_FILL(pcie_toggle_to, out, dev, MLX5_TO_PCI_TOGGLE_MS, 0);
+ MLX5_TIMEOUT_FILL(fw_reset_to, out, dev, MLX5_TO_FW_RESET_MS, pcie_toggle_to_val);
+
+ tear_down_to_val = MLX5_TIMEOUT_FILL(tear_down_to, out, dev, MLX5_TO_TEARDOWN_MS, 0);
+ MLX5_TIMEOUT_FILL(pci_sync_update_to, out, dev, MLX5_TO_PCI_SYNC_UPDATE_MS,
+ tear_down_to_val);
+
+ MLX5_TIMEOUT_FILL(health_poll_to, out, dev, MLX5_TO_HEALTH_POLL_INTERVAL_MS, 0);
+ MLX5_TIMEOUT_FILL(full_crdump_to, out, dev, MLX5_TO_FULL_CRDUMP_MS, 0);
+ MLX5_TIMEOUT_FILL(flush_on_err_to, out, dev, MLX5_TO_FLUSH_ON_ERROR_MS, 0);
+ MLX5_TIMEOUT_FILL(fsm_reactivate_to, out, dev, MLX5_TO_FSM_REACTIVATE_MS, 0);
+ MLX5_TIMEOUT_FILL(reclaim_pages_to, out, dev, MLX5_TO_RECLAIM_PAGES_MS, 0);
+ MLX5_TIMEOUT_FILL(reclaim_vfs_pages_to, out, dev, MLX5_TO_RECLAIM_VFS_PAGES_MS, 0);
+
+ return 0;
+}
+
+int mlx5_tout_query_dtor(struct mlx5_core_dev *dev)
+{
+ if (tout_is_supported(dev))
+ return tout_query_dtor(dev);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.h
new file mode 100644
index 000000000000..31faa5c17aa9
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#ifndef MLX5_TIMEOUTS_H
+#define MLX5_TIMEOUTS_H
+
+enum mlx5_timeouts_types {
+ /* pre init timeouts (not read from FW) */
+ MLX5_TO_FW_PRE_INIT_TIMEOUT_MS,
+ MLX5_TO_FW_PRE_INIT_WARN_MESSAGE_INTERVAL_MS,
+ MLX5_TO_FW_PRE_INIT_WAIT_MS,
+
+ /* init segment timeouts */
+ MLX5_TO_FW_INIT_MS,
+ MLX5_TO_CMD_MS,
+
+ /* DTOR timeouts */
+ MLX5_TO_PCI_TOGGLE_MS,
+ MLX5_TO_HEALTH_POLL_INTERVAL_MS,
+ MLX5_TO_FULL_CRDUMP_MS,
+ MLX5_TO_FW_RESET_MS,
+ MLX5_TO_FLUSH_ON_ERROR_MS,
+ MLX5_TO_PCI_SYNC_UPDATE_MS,
+ MLX5_TO_TEARDOWN_MS,
+ MLX5_TO_FSM_REACTIVATE_MS,
+ MLX5_TO_RECLAIM_PAGES_MS,
+ MLX5_TO_RECLAIM_VFS_PAGES_MS,
+
+ MAX_TIMEOUT_TYPES
+};
+
+struct mlx5_core_dev;
+int mlx5_tout_init(struct mlx5_core_dev *dev);
+void mlx5_tout_cleanup(struct mlx5_core_dev *dev);
+void mlx5_tout_query_iseg(struct mlx5_core_dev *dev);
+int mlx5_tout_query_dtor(struct mlx5_core_dev *dev);
+u64 _mlx5_tout_ms(struct mlx5_core_dev *dev, enum mlx5_timeouts_types type);
+
+#define mlx5_tout_ms(dev, type) _mlx5_tout_ms(dev, MLX5_TO_##type##_MS)
+
+# endif /* MLX5_TIMEOUTS_H */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 79482824c64f..f8446395163a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -60,6 +60,7 @@
#include "devlink.h"
#include "fw_reset.h"
#include "lib/mlx5.h"
+#include "lib/tout.h"
#include "fpga/core.h"
#include "fpga/ipsec.h"
#include "accel/ipsec.h"
@@ -176,11 +177,6 @@ static struct mlx5_profile profile[] = {
},
};
-#define FW_INIT_TIMEOUT_MILI 2000
-#define FW_INIT_WAIT_MS 2
-#define FW_PRE_INIT_TIMEOUT_MILI 120000
-#define FW_INIT_WARN_MESSAGE_INTERVAL 20000
-
static int fw_initializing(struct mlx5_core_dev *dev)
{
return ioread32be(&dev->iseg->initializing) >> 31;
@@ -193,8 +189,6 @@ static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili,
unsigned long end = jiffies + msecs_to_jiffies(max_wait_mili);
int err = 0;
- BUILD_BUG_ON(FW_PRE_INIT_TIMEOUT_MILI < FW_INIT_WARN_MESSAGE_INTERVAL);
-
while (fw_initializing(dev)) {
if (time_after(jiffies, end)) {
err = -EBUSY;
@@ -205,7 +199,7 @@ static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili,
jiffies_to_msecs(end - warn) / 1000);
warn = jiffies + msecs_to_jiffies(warn_time_mili);
}
- msleep(FW_INIT_WAIT_MS);
+ msleep(mlx5_tout_ms(dev, FW_PRE_INIT_WAIT));
}
return err;
@@ -564,15 +558,38 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
MLX5_SET(cmd_hca_cap, set_hca_cap, num_total_dynamic_vf_msix,
MLX5_CAP_GEN_MAX(dev, num_total_dynamic_vf_msix));
+ if (MLX5_CAP_GEN(dev, roce_rw_supported))
+ MLX5_SET(cmd_hca_cap, set_hca_cap, roce, mlx5_is_roce_init_enabled(dev));
+
return set_caps(dev, set_ctx, MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE);
}
+/* Cached MLX5_CAP_GEN(dev, roce) can be out of sync this early in the
+ * boot process.
+ * In case RoCE cap is writable in FW and user/devlink requested to change the
+ * cap, we are yet to query the final state of the above cap.
+ * Hence, the need for this function.
+ *
+ * Returns
+ * True:
+ * 1) RoCE cap is read only in FW and already disabled
+ * OR:
+ * 2) RoCE cap is writable in FW and user/devlink requested it off.
+ *
+ * In any other case, return False.
+ */
+static bool is_roce_fw_disabled(struct mlx5_core_dev *dev)
+{
+ return (MLX5_CAP_GEN(dev, roce_rw_supported) && !mlx5_is_roce_init_enabled(dev)) ||
+ (!MLX5_CAP_GEN(dev, roce_rw_supported) && !MLX5_CAP_GEN(dev, roce));
+}
+
static int handle_hca_cap_roce(struct mlx5_core_dev *dev, void *set_ctx)
{
void *set_hca_cap;
int err;
- if (!MLX5_CAP_GEN(dev, roce))
+ if (is_roce_fw_disabled(dev))
return 0;
err = mlx5_core_get_caps(dev, MLX5_CAP_ROCE);
@@ -975,25 +992,34 @@ static int mlx5_function_setup(struct mlx5_core_dev *dev, bool boot)
if (mlx5_core_is_pf(dev))
pcie_print_link_status(dev->pdev);
+ err = mlx5_tout_init(dev);
+ if (err) {
+ mlx5_core_err(dev, "Failed initializing timeouts, aborting\n");
+ return err;
+ }
+
/* wait for firmware to accept initialization segments configurations
*/
- err = wait_fw_init(dev, FW_PRE_INIT_TIMEOUT_MILI, FW_INIT_WARN_MESSAGE_INTERVAL);
+ err = wait_fw_init(dev, mlx5_tout_ms(dev, FW_PRE_INIT_TIMEOUT),
+ mlx5_tout_ms(dev, FW_PRE_INIT_WARN_MESSAGE_INTERVAL));
if (err) {
- mlx5_core_err(dev, "Firmware over %d MS in pre-initializing state, aborting\n",
- FW_PRE_INIT_TIMEOUT_MILI);
- return err;
+ mlx5_core_err(dev, "Firmware over %llu MS in pre-initializing state, aborting\n",
+ mlx5_tout_ms(dev, FW_PRE_INIT_TIMEOUT));
+ goto err_tout_cleanup;
}
err = mlx5_cmd_init(dev);
if (err) {
mlx5_core_err(dev, "Failed initializing command interface, aborting\n");
- return err;
+ goto err_tout_cleanup;
}
- err = wait_fw_init(dev, FW_INIT_TIMEOUT_MILI, 0);
+ mlx5_tout_query_iseg(dev);
+
+ err = wait_fw_init(dev, mlx5_tout_ms(dev, FW_INIT), 0);
if (err) {
- mlx5_core_err(dev, "Firmware over %d MS in initializing state, aborting\n",
- FW_INIT_TIMEOUT_MILI);
+ mlx5_core_err(dev, "Firmware over %llu MS in initializing state, aborting\n",
+ mlx5_tout_ms(dev, FW_INIT));
goto err_cmd_cleanup;
}
@@ -1017,6 +1043,12 @@ static int mlx5_function_setup(struct mlx5_core_dev *dev, bool boot)
goto err_disable_hca;
}
+ err = mlx5_tout_query_dtor(dev);
+ if (err) {
+ mlx5_core_err(dev, "failed to read dtor\n");
+ goto reclaim_boot_pages;
+ }
+
err = set_hca_ctrl(dev);
if (err) {
mlx5_core_err(dev, "set_hca_ctrl failed\n");
@@ -1062,6 +1094,8 @@ err_disable_hca:
err_cmd_cleanup:
mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_DOWN);
mlx5_cmd_cleanup(dev);
+err_tout_cleanup:
+ mlx5_tout_cleanup(dev);
return err;
}
@@ -1080,6 +1114,7 @@ static int mlx5_function_teardown(struct mlx5_core_dev *dev, bool boot)
mlx5_core_disable_hca(dev, 0);
mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_DOWN);
mlx5_cmd_cleanup(dev);
+ mlx5_tout_cleanup(dev);
return 0;
}
@@ -1112,8 +1147,9 @@ static int mlx5_load(struct mlx5_core_dev *dev)
err = mlx5_fw_tracer_init(dev->tracer);
if (err) {
- mlx5_core_err(dev, "Failed to init FW tracer\n");
- goto err_fw_tracer;
+ mlx5_core_err(dev, "Failed to init FW tracer %d\n", err);
+ mlx5_fw_tracer_destroy(dev->tracer);
+ dev->tracer = NULL;
}
mlx5_fw_reset_events_start(dev);
@@ -1121,8 +1157,9 @@ static int mlx5_load(struct mlx5_core_dev *dev)
err = mlx5_rsc_dump_init(dev);
if (err) {
- mlx5_core_err(dev, "Failed to init Resource dump\n");
- goto err_rsc_dump;
+ mlx5_core_err(dev, "Failed to init Resource dump %d\n", err);
+ mlx5_rsc_dump_destroy(dev);
+ dev->rsc_dump = NULL;
}
err = mlx5_fpga_device_start(dev);
@@ -1192,11 +1229,9 @@ err_tls_start:
mlx5_fpga_device_stop(dev);
err_fpga_start:
mlx5_rsc_dump_cleanup(dev);
-err_rsc_dump:
mlx5_hv_vhca_cleanup(dev->hv_vhca);
mlx5_fw_reset_events_stop(dev);
mlx5_fw_tracer_cleanup(dev->tracer);
-err_fw_tracer:
mlx5_eq_table_destroy(dev);
err_eq_table:
mlx5_irq_table_destroy(dev);
@@ -1381,6 +1416,7 @@ static const int types[] = {
MLX5_CAP_TLS,
MLX5_CAP_VDPA_EMULATION,
MLX5_CAP_IPSEC,
+ MLX5_CAP_PORT_SELECTION,
};
static void mlx5_hca_caps_free(struct mlx5_core_dev *dev)
@@ -1537,8 +1573,7 @@ static int probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
dev_err(&pdev->dev, "mlx5_crdump_enable failed with error code %d\n", err);
pci_save_state(pdev);
- if (!mlx5_core_is_mp_slave(dev))
- devlink_reload_enable(devlink);
+ devlink_register(devlink);
return 0;
err_init_one:
@@ -1558,7 +1593,7 @@ static void remove_one(struct pci_dev *pdev)
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
struct devlink *devlink = priv_to_devlink(dev);
- devlink_reload_disable(devlink);
+ devlink_unregister(devlink);
mlx5_crdump_disable(dev);
mlx5_drain_health_wq(dev);
mlx5_uninit_one(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h
index abd024173c42..8116815663a7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h
@@ -8,8 +8,6 @@
#define MLX5_COMP_EQS_PER_SF 8
-#define MLX5_IRQ_EQ_CTRL (0)
-
struct mlx5_irq;
int mlx5_irq_table_init(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index 110c0837f95b..f6b5451328fc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -38,6 +38,7 @@
#include <linux/xarray.h>
#include "mlx5_core.h"
#include "lib/eq.h"
+#include "lib/tout.h"
enum {
MLX5_PAGES_CANT_GIVE = 0,
@@ -65,11 +66,6 @@ struct fw_page {
};
enum {
- MAX_RECLAIM_TIME_MSECS = 5000,
- MAX_RECLAIM_VFS_PAGES_TIME_MSECS = 2 * 1000 * 60,
-};
-
-enum {
MLX5_MAX_RECLAIM_TIME_MILI = 5000,
MLX5_NUM_4K_IN_PAGE = PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE,
};
@@ -641,7 +637,8 @@ static int optimal_reclaimed_pages(void)
static int mlx5_reclaim_root_pages(struct mlx5_core_dev *dev,
struct rb_root *root, u16 func_id)
{
- unsigned long end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS);
+ u64 recl_pages_to_jiffies = msecs_to_jiffies(mlx5_tout_ms(dev, RECLAIM_PAGES));
+ unsigned long end = jiffies + recl_pages_to_jiffies;
while (!RB_EMPTY_ROOT(root)) {
int nclaimed;
@@ -656,7 +653,7 @@ static int mlx5_reclaim_root_pages(struct mlx5_core_dev *dev,
}
if (nclaimed)
- end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS);
+ end = jiffies + recl_pages_to_jiffies;
if (time_after(jiffies, end)) {
mlx5_core_warn(dev, "FW did not return all pages. giving up...\n");
@@ -727,7 +724,8 @@ void mlx5_pagealloc_stop(struct mlx5_core_dev *dev)
int mlx5_wait_for_pages(struct mlx5_core_dev *dev, int *pages)
{
- unsigned long end = jiffies + msecs_to_jiffies(MAX_RECLAIM_VFS_PAGES_TIME_MSECS);
+ u64 recl_vf_pages_to_jiffies = msecs_to_jiffies(mlx5_tout_ms(dev, RECLAIM_VFS_PAGES));
+ unsigned long end = jiffies + recl_vf_pages_to_jiffies;
int prev_pages = *pages;
/* In case of internal error we will free the pages manually later */
@@ -743,7 +741,7 @@ int mlx5_wait_for_pages(struct mlx5_core_dev *dev, int *pages)
return -ETIMEDOUT;
}
if (*pages < prev_pages) {
- end = jiffies + msecs_to_jiffies(MAX_RECLAIM_VFS_PAGES_TIME_MSECS);
+ end = jiffies + recl_vf_pages_to_jiffies;
prev_pages = *pages;
}
msleep(50);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
index 763c83a02380..830444f927d4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
@@ -194,15 +194,25 @@ static void irq_sf_set_name(struct mlx5_irq_pool *pool, char *name, int vecidx)
snprintf(name, MLX5_MAX_IRQ_NAME, "%s%d", pool->name, vecidx);
}
-static void irq_set_name(char *name, int vecidx)
+static void irq_set_name(struct mlx5_irq_pool *pool, char *name, int vecidx)
{
- if (vecidx == 0) {
+ if (!pool->xa_num_irqs.max) {
+ /* in case we only have a single irq for the device */
+ snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_combined%d", vecidx);
+ return;
+ }
+
+ if (vecidx == pool->xa_num_irqs.max) {
snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_async%d", vecidx);
return;
}
- snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d",
- vecidx - MLX5_IRQ_VEC_COMP_BASE);
+ snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d", vecidx);
+}
+
+static bool irq_pool_is_sf_pool(struct mlx5_irq_pool *pool)
+{
+ return !strncmp("mlx5_sf", pool->name, strlen("mlx5_sf"));
}
static struct mlx5_irq *irq_request(struct mlx5_irq_pool *pool, int i)
@@ -216,8 +226,8 @@ static struct mlx5_irq *irq_request(struct mlx5_irq_pool *pool, int i)
if (!irq)
return ERR_PTR(-ENOMEM);
irq->irqn = pci_irq_vector(dev->pdev, i);
- if (!pool->name[0])
- irq_set_name(name, i);
+ if (!irq_pool_is_sf_pool(pool))
+ irq_set_name(pool, name, i);
else
irq_sf_set_name(pool, name, i);
ATOMIC_INIT_NOTIFIER_HEAD(&irq->nh);
@@ -386,6 +396,9 @@ irq_pool_request_vector(struct mlx5_irq_pool *pool, int vecidx,
if (IS_ERR(irq) || !affinity)
goto unlock;
cpumask_copy(irq->mask, affinity);
+ if (!irq_pool_is_sf_pool(pool) && !pool->xa_num_irqs.max &&
+ cpumask_empty(irq->mask))
+ cpumask_set_cpu(0, irq->mask);
irq_set_affinity_hint(irq->irqn, irq->mask);
unlock:
mutex_unlock(&pool->lock);
@@ -440,6 +453,7 @@ struct mlx5_irq *mlx5_irq_request(struct mlx5_core_dev *dev, u16 vecidx,
}
pf_irq:
pool = irq_table->pf_pool;
+ vecidx = (vecidx == MLX5_IRQ_EQ_CTRL) ? pool->xa_num_irqs.max : vecidx;
irq = irq_pool_request_vector(pool, vecidx, affinity);
out:
if (IS_ERR(irq))
@@ -577,6 +591,8 @@ void mlx5_irq_table_cleanup(struct mlx5_core_dev *dev)
int mlx5_irq_table_get_num_comp(struct mlx5_irq_table *table)
{
+ if (!table->pf_pool->xa_num_irqs.max)
+ return 1;
return table->pf_pool->xa_num_irqs.max - table->pf_pool->xa_num_irqs.min;
}
@@ -592,19 +608,15 @@ int mlx5_irq_table_create(struct mlx5_core_dev *dev)
if (mlx5_core_is_sf(dev))
return 0;
- pf_vec = MLX5_CAP_GEN(dev, num_ports) * num_online_cpus() +
- MLX5_IRQ_VEC_COMP_BASE;
+ pf_vec = MLX5_CAP_GEN(dev, num_ports) * num_online_cpus() + 1;
pf_vec = min_t(int, pf_vec, num_eqs);
- if (pf_vec <= MLX5_IRQ_VEC_COMP_BASE)
- return -ENOMEM;
total_vec = pf_vec;
if (mlx5_sf_max_functions(dev))
total_vec += MLX5_IRQ_CTRL_SF_MAX +
MLX5_COMP_EQS_PER_SF * mlx5_sf_max_functions(dev);
- total_vec = pci_alloc_irq_vectors(dev->pdev, MLX5_IRQ_VEC_COMP_BASE + 1,
- total_vec, PCI_IRQ_MSIX);
+ total_vec = pci_alloc_irq_vectors(dev->pdev, 1, total_vec, PCI_IRQ_MSIX);
if (total_vec < 0)
return total_vec;
pf_vec = min(pf_vec, total_vec);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
index 052f48068dc1..7b4783ce213e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
@@ -46,7 +46,7 @@ static int mlx5_sf_dev_probe(struct auxiliary_device *adev, const struct auxilia
mlx5_core_warn(mdev, "mlx5_init_one err=%d\n", err);
goto init_one_err;
}
- devlink_reload_enable(devlink);
+ devlink_register(devlink);
return 0;
init_one_err:
@@ -61,10 +61,9 @@ mdev_err:
static void mlx5_sf_dev_remove(struct auxiliary_device *adev)
{
struct mlx5_sf_dev *sf_dev = container_of(adev, struct mlx5_sf_dev, adev);
- struct devlink *devlink;
+ struct devlink *devlink = priv_to_devlink(sf_dev->mdev);
- devlink = priv_to_devlink(sf_dev->mdev);
- devlink_reload_disable(devlink);
+ devlink_unregister(devlink);
mlx5_uninit_one(sf_dev->mdev);
iounmap(sf_dev->mdev->iseg);
mlx5_mdev_uninit(sf_dev->mdev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
index 13891fdc607e..e1bb3acf45e6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
@@ -323,7 +323,7 @@ mlx5_sf_new_check_attr(struct mlx5_core_dev *dev, const struct devlink_port_new_
NL_SET_ERR_MSG_MOD(extack, "External controller is unsupported");
return -EOPNOTSUPP;
}
- if (new_attr->pfnum != PCI_FUNC(dev->pdev->devfn)) {
+ if (new_attr->pfnum != mlx5_get_dev_index(dev)) {
NL_SET_ERR_MSG_MOD(extack, "Invalid pfnum supplied");
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
index a5b9f65db23c..07936841ce99 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
@@ -39,6 +39,7 @@ static const char * const action_type_to_str[] = {
[DR_ACTION_TYP_VPORT] = "DR_ACTION_TYP_VPORT",
[DR_ACTION_TYP_POP_VLAN] = "DR_ACTION_TYP_POP_VLAN",
[DR_ACTION_TYP_PUSH_VLAN] = "DR_ACTION_TYP_PUSH_VLAN",
+ [DR_ACTION_TYP_SAMPLER] = "DR_ACTION_TYP_SAMPLER",
[DR_ACTION_TYP_INSERT_HDR] = "DR_ACTION_TYP_INSERT_HDR",
[DR_ACTION_TYP_REMOVE_HDR] = "DR_ACTION_TYP_REMOVE_HDR",
[DR_ACTION_TYP_MAX] = "DR_ACTION_UNKNOWN",
@@ -513,9 +514,9 @@ static int dr_action_handle_cs_recalc(struct mlx5dr_domain *dmn,
/* If destination is vport we will get the FW flow table
* that recalculates the CS and forwards to the vport.
*/
- ret = mlx5dr_domain_cache_get_recalc_cs_ft_addr(dest_action->vport->dmn,
- dest_action->vport->caps->num,
- final_icm_addr);
+ ret = mlx5dr_domain_get_recalc_cs_ft_addr(dest_action->vport->dmn,
+ dest_action->vport->caps->num,
+ final_icm_addr);
if (ret) {
mlx5dr_err(dmn, "Failed to get FW cs recalc flow table\n");
return ret;
@@ -632,7 +633,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
return -EOPNOTSUPP;
case DR_ACTION_TYP_CTR:
attr.ctr_id = action->ctr->ctr_id +
- action->ctr->offeset;
+ action->ctr->offset;
break;
case DR_ACTION_TYP_TAG:
attr.flow_tag = action->flow_tag->flow_tag;
@@ -669,7 +670,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
attr.hit_gvmi = action->vport->caps->vhca_gvmi;
dest_action = action;
if (rx_rule) {
- if (action->vport->caps->num == WIRE_PORT) {
+ if (action->vport->caps->num == MLX5_VPORT_UPLINK) {
mlx5dr_dbg(dmn, "Device doesn't support Loopback on WIRE vport\n");
return -EOPNOTSUPP;
}
@@ -853,6 +854,7 @@ mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
struct mlx5dr_action *action;
bool reformat_req = false;
u32 num_of_ref = 0;
+ u32 ref_act_cnt;
int ret;
int i;
@@ -861,11 +863,14 @@ mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
return NULL;
}
- hw_dests = kzalloc(sizeof(*hw_dests) * num_of_dests, GFP_KERNEL);
+ hw_dests = kcalloc(num_of_dests, sizeof(*hw_dests), GFP_KERNEL);
if (!hw_dests)
return NULL;
- ref_actions = kzalloc(sizeof(*ref_actions) * num_of_dests * 2, GFP_KERNEL);
+ if (unlikely(check_mul_overflow(num_of_dests, 2u, &ref_act_cnt)))
+ goto free_hw_dests;
+
+ ref_actions = kcalloc(ref_act_cnt, sizeof(*ref_actions), GFP_KERNEL);
if (!ref_actions)
goto free_hw_dests;
@@ -1747,7 +1752,7 @@ dec_ref:
struct mlx5dr_action *
mlx5dr_action_create_dest_vport(struct mlx5dr_domain *dmn,
- u32 vport, u8 vhca_id_valid,
+ u16 vport, u8 vhca_id_valid,
u16 vhca_id)
{
struct mlx5dr_cmd_vport_cap *vport_cap;
@@ -1767,9 +1772,11 @@ mlx5dr_action_create_dest_vport(struct mlx5dr_domain *dmn,
return NULL;
}
- vport_cap = mlx5dr_get_vport_cap(&vport_dmn->info.caps, vport);
+ vport_cap = mlx5dr_domain_get_vport_cap(vport_dmn, vport);
if (!vport_cap) {
- mlx5dr_dbg(dmn, "Failed to get vport %d caps\n", vport);
+ mlx5dr_err(dmn,
+ "Failed to get vport 0x%x caps - vport is disabled or invalid\n",
+ vport);
return NULL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
index 56307283bf9b..1d8febed0d76 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
@@ -195,6 +195,8 @@ int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev,
caps->roce_min_src_udp = MLX5_CAP_ROCE(mdev, r_roce_min_src_udp_port);
+ caps->is_ecpf = mlx5_core_is_ecpf_esw_manager(mdev);
+
return 0;
}
@@ -272,7 +274,7 @@ int mlx5dr_cmd_set_fte_modify_and_vport(struct mlx5_core_dev *mdev,
u32 table_id,
u32 group_id,
u32 modify_header_id,
- u32 vport_id)
+ u16 vport)
{
u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {};
void *in_flow_context;
@@ -303,7 +305,7 @@ int mlx5dr_cmd_set_fte_modify_and_vport(struct mlx5_core_dev *mdev,
in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
MLX5_SET(dest_format_struct, in_dests, destination_type,
MLX5_FLOW_DESTINATION_TYPE_VPORT);
- MLX5_SET(dest_format_struct, in_dests, destination_id, vport_id);
+ MLX5_SET(dest_format_struct, in_dests, destination_id, vport);
err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
kvfree(in);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
index 0fe159809ba1..49089cbe897c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
@@ -9,48 +9,45 @@
((dmn)->info.caps.dmn_type##_sw_owner_v2 && \
(dmn)->info.caps.sw_format_ver <= MLX5_STEERING_FORMAT_CONNECTX_6DX))
-static int dr_domain_init_cache(struct mlx5dr_domain *dmn)
+static void dr_domain_init_csum_recalc_fts(struct mlx5dr_domain *dmn)
{
/* Per vport cached FW FT for checksum recalculation, this
- * recalculation is needed due to a HW bug.
+ * recalculation is needed due to a HW bug in STEv0.
*/
- dmn->cache.recalc_cs_ft = kcalloc(dmn->info.caps.num_vports,
- sizeof(dmn->cache.recalc_cs_ft[0]),
- GFP_KERNEL);
- if (!dmn->cache.recalc_cs_ft)
- return -ENOMEM;
-
- return 0;
+ xa_init(&dmn->csum_fts_xa);
}
-static void dr_domain_uninit_cache(struct mlx5dr_domain *dmn)
+static void dr_domain_uninit_csum_recalc_fts(struct mlx5dr_domain *dmn)
{
- int i;
-
- for (i = 0; i < dmn->info.caps.num_vports; i++) {
- if (!dmn->cache.recalc_cs_ft[i])
- continue;
+ struct mlx5dr_fw_recalc_cs_ft *recalc_cs_ft;
+ unsigned long i;
- mlx5dr_fw_destroy_recalc_cs_ft(dmn, dmn->cache.recalc_cs_ft[i]);
+ xa_for_each(&dmn->csum_fts_xa, i, recalc_cs_ft) {
+ if (recalc_cs_ft)
+ mlx5dr_fw_destroy_recalc_cs_ft(dmn, recalc_cs_ft);
}
- kfree(dmn->cache.recalc_cs_ft);
+ xa_destroy(&dmn->csum_fts_xa);
}
-int mlx5dr_domain_cache_get_recalc_cs_ft_addr(struct mlx5dr_domain *dmn,
- u32 vport_num,
- u64 *rx_icm_addr)
+int mlx5dr_domain_get_recalc_cs_ft_addr(struct mlx5dr_domain *dmn,
+ u16 vport_num,
+ u64 *rx_icm_addr)
{
struct mlx5dr_fw_recalc_cs_ft *recalc_cs_ft;
+ int ret;
- recalc_cs_ft = dmn->cache.recalc_cs_ft[vport_num];
+ recalc_cs_ft = xa_load(&dmn->csum_fts_xa, vport_num);
if (!recalc_cs_ft) {
- /* Table not in cache, need to allocate a new one */
+ /* Table hasn't been created yet */
recalc_cs_ft = mlx5dr_fw_create_recalc_cs_ft(dmn, vport_num);
if (!recalc_cs_ft)
return -EINVAL;
- dmn->cache.recalc_cs_ft[vport_num] = recalc_cs_ft;
+ ret = xa_err(xa_store(&dmn->csum_fts_xa, vport_num,
+ recalc_cs_ft, GFP_KERNEL));
+ if (ret)
+ return ret;
}
*rx_icm_addr = recalc_cs_ft->rx_icm_addr;
@@ -124,18 +121,39 @@ static void dr_domain_uninit_resources(struct mlx5dr_domain *dmn)
mlx5_core_dealloc_pd(dmn->mdev, dmn->pdn);
}
+static void dr_domain_fill_uplink_caps(struct mlx5dr_domain *dmn,
+ struct mlx5dr_cmd_vport_cap *uplink_vport)
+{
+ struct mlx5dr_esw_caps *esw_caps = &dmn->info.caps.esw_caps;
+
+ uplink_vport->num = MLX5_VPORT_UPLINK;
+ uplink_vport->icm_address_rx = esw_caps->uplink_icm_address_rx;
+ uplink_vport->icm_address_tx = esw_caps->uplink_icm_address_tx;
+ uplink_vport->vport_gvmi = 0;
+ uplink_vport->vhca_gvmi = dmn->info.caps.gvmi;
+}
+
static int dr_domain_query_vport(struct mlx5dr_domain *dmn,
- bool other_vport,
- u16 vport_number)
+ u16 vport_number,
+ struct mlx5dr_cmd_vport_cap *vport_caps)
{
- struct mlx5dr_cmd_vport_cap *vport_caps;
+ u16 cmd_vport = vport_number;
+ bool other_vport = true;
int ret;
- vport_caps = &dmn->info.caps.vports_caps[vport_number];
+ if (vport_number == MLX5_VPORT_UPLINK) {
+ dr_domain_fill_uplink_caps(dmn, vport_caps);
+ return 0;
+ }
+
+ if (dmn->info.caps.is_ecpf && vport_number == MLX5_VPORT_ECPF) {
+ other_vport = false;
+ cmd_vport = 0;
+ }
ret = mlx5dr_cmd_query_esw_vport_context(dmn->mdev,
other_vport,
- vport_number,
+ cmd_vport,
&vport_caps->icm_address_rx,
&vport_caps->icm_address_tx);
if (ret)
@@ -143,7 +161,7 @@ static int dr_domain_query_vport(struct mlx5dr_domain *dmn,
ret = mlx5dr_cmd_query_gvmi(dmn->mdev,
other_vport,
- vport_number,
+ cmd_vport,
&vport_caps->vport_gvmi);
if (ret)
return ret;
@@ -154,27 +172,82 @@ static int dr_domain_query_vport(struct mlx5dr_domain *dmn,
return 0;
}
-static int dr_domain_query_vports(struct mlx5dr_domain *dmn)
+static int dr_domain_query_esw_mngr(struct mlx5dr_domain *dmn)
{
- struct mlx5dr_esw_caps *esw_caps = &dmn->info.caps.esw_caps;
- struct mlx5dr_cmd_vport_cap *wire_vport;
- int vport;
+ return dr_domain_query_vport(dmn,
+ dmn->info.caps.is_ecpf ? MLX5_VPORT_ECPF : 0,
+ &dmn->info.caps.vports.esw_manager_caps);
+}
+
+static struct mlx5dr_cmd_vport_cap *
+dr_domain_add_vport_cap(struct mlx5dr_domain *dmn, u16 vport)
+{
+ struct mlx5dr_cmd_caps *caps = &dmn->info.caps;
+ struct mlx5dr_cmd_vport_cap *vport_caps;
int ret;
- /* Query vports (except wire vport) */
- for (vport = 0; vport < dmn->info.caps.num_esw_ports - 1; vport++) {
- ret = dr_domain_query_vport(dmn, !!vport, vport);
- if (ret)
- return ret;
+ vport_caps = kvzalloc(sizeof(*vport_caps), GFP_KERNEL);
+ if (!vport_caps)
+ return NULL;
+
+ ret = dr_domain_query_vport(dmn, vport, vport_caps);
+ if (ret) {
+ kvfree(vport_caps);
+ return NULL;
}
- /* Last vport is the wire port */
- wire_vport = &dmn->info.caps.vports_caps[vport];
- wire_vport->num = WIRE_PORT;
- wire_vport->icm_address_rx = esw_caps->uplink_icm_address_rx;
- wire_vport->icm_address_tx = esw_caps->uplink_icm_address_tx;
- wire_vport->vport_gvmi = 0;
- wire_vport->vhca_gvmi = dmn->info.caps.gvmi;
+ ret = xa_insert(&caps->vports.vports_caps_xa, vport,
+ vport_caps, GFP_KERNEL);
+ if (ret) {
+ mlx5dr_dbg(dmn, "Couldn't insert new vport into xarray (%d)\n", ret);
+ kvfree(vport_caps);
+ return ERR_PTR(ret);
+ }
+
+ return vport_caps;
+}
+
+struct mlx5dr_cmd_vport_cap *
+mlx5dr_domain_get_vport_cap(struct mlx5dr_domain *dmn, u16 vport)
+{
+ struct mlx5dr_cmd_caps *caps = &dmn->info.caps;
+ struct mlx5dr_cmd_vport_cap *vport_caps;
+
+ if ((caps->is_ecpf && vport == MLX5_VPORT_ECPF) ||
+ (!caps->is_ecpf && vport == 0))
+ return &caps->vports.esw_manager_caps;
+
+vport_load:
+ vport_caps = xa_load(&caps->vports.vports_caps_xa, vport);
+ if (vport_caps)
+ return vport_caps;
+
+ vport_caps = dr_domain_add_vport_cap(dmn, vport);
+ if (PTR_ERR(vport_caps) == -EBUSY)
+ /* caps were already stored by another thread */
+ goto vport_load;
+
+ return vport_caps;
+}
+
+static void dr_domain_clear_vports(struct mlx5dr_domain *dmn)
+{
+ struct mlx5dr_cmd_vport_cap *vport_caps;
+ unsigned long i;
+
+ xa_for_each(&dmn->info.caps.vports.vports_caps_xa, i, vport_caps) {
+ vport_caps = xa_erase(&dmn->info.caps.vports.vports_caps_xa, i);
+ kvfree(vport_caps);
+ }
+}
+
+static int dr_domain_query_uplink(struct mlx5dr_domain *dmn)
+{
+ struct mlx5dr_cmd_vport_cap *vport_caps;
+
+ vport_caps = mlx5dr_domain_get_vport_cap(dmn, MLX5_VPORT_UPLINK);
+ if (!vport_caps)
+ return -EINVAL;
return 0;
}
@@ -196,25 +269,29 @@ static int dr_domain_query_fdb_caps(struct mlx5_core_dev *mdev,
dmn->info.caps.esw_rx_drop_address = dmn->info.caps.esw_caps.drop_icm_address_rx;
dmn->info.caps.esw_tx_drop_address = dmn->info.caps.esw_caps.drop_icm_address_tx;
- dmn->info.caps.vports_caps = kcalloc(dmn->info.caps.num_esw_ports,
- sizeof(dmn->info.caps.vports_caps[0]),
- GFP_KERNEL);
- if (!dmn->info.caps.vports_caps)
- return -ENOMEM;
+ xa_init(&dmn->info.caps.vports.vports_caps_xa);
- ret = dr_domain_query_vports(dmn);
+ /* Query eswitch manager and uplink vports only. Rest of the
+ * vports (vport 0, VFs and SFs) will be queried dynamically.
+ */
+
+ ret = dr_domain_query_esw_mngr(dmn);
if (ret) {
- mlx5dr_err(dmn, "Failed to query vports caps (err: %d)", ret);
- goto free_vports_caps;
+ mlx5dr_err(dmn, "Failed to query eswitch manager vport caps (err: %d)", ret);
+ goto free_vports_caps_xa;
}
- dmn->info.caps.num_vports = dmn->info.caps.num_esw_ports - 1;
+ ret = dr_domain_query_uplink(dmn);
+ if (ret) {
+ mlx5dr_err(dmn, "Failed to query uplink vport caps (err: %d)", ret);
+ goto free_vports_caps_xa;
+ }
return 0;
-free_vports_caps:
- kfree(dmn->info.caps.vports_caps);
- dmn->info.caps.vports_caps = NULL;
+free_vports_caps_xa:
+ xa_destroy(&dmn->info.caps.vports.vports_caps_xa);
+
return ret;
}
@@ -229,8 +306,6 @@ static int dr_domain_caps_init(struct mlx5_core_dev *mdev,
return -EOPNOTSUPP;
}
- dmn->info.caps.num_esw_ports = mlx5_eswitch_get_total_vports(mdev);
-
ret = mlx5dr_cmd_query_device(mdev, &dmn->info.caps);
if (ret)
return ret;
@@ -267,11 +342,7 @@ static int dr_domain_caps_init(struct mlx5_core_dev *mdev,
dmn->info.rx.type = DR_DOMAIN_NIC_TYPE_RX;
dmn->info.tx.type = DR_DOMAIN_NIC_TYPE_TX;
- vport_cap = mlx5dr_get_vport_cap(&dmn->info.caps, 0);
- if (!vport_cap) {
- mlx5dr_err(dmn, "Failed to get esw manager vport\n");
- return -ENOENT;
- }
+ vport_cap = &dmn->info.caps.vports.esw_manager_caps;
dmn->info.supp_sw_steering = true;
dmn->info.tx.default_icm_addr = vport_cap->icm_address_tx;
@@ -290,7 +361,8 @@ static int dr_domain_caps_init(struct mlx5_core_dev *mdev,
static void dr_domain_caps_uninit(struct mlx5dr_domain *dmn)
{
- kfree(dmn->info.caps.vports_caps);
+ dr_domain_clear_vports(dmn);
+ xa_destroy(&dmn->info.caps.vports.vports_caps_xa);
}
struct mlx5dr_domain *
@@ -333,16 +405,10 @@ mlx5dr_domain_create(struct mlx5_core_dev *mdev, enum mlx5dr_domain_type type)
goto uninit_caps;
}
- ret = dr_domain_init_cache(dmn);
- if (ret) {
- mlx5dr_err(dmn, "Failed initialize domain cache\n");
- goto uninit_resourses;
- }
+ dr_domain_init_csum_recalc_fts(dmn);
return dmn;
-uninit_resourses:
- dr_domain_uninit_resources(dmn);
uninit_caps:
dr_domain_caps_uninit(dmn);
free_domain:
@@ -381,7 +447,7 @@ int mlx5dr_domain_destroy(struct mlx5dr_domain *dmn)
/* make sure resources are not used by the hardware */
mlx5dr_cmd_sync_steering(dmn->mdev);
- dr_domain_uninit_cache(dmn);
+ dr_domain_uninit_csum_recalc_fts(dmn);
dr_domain_uninit_resources(dmn);
dr_domain_caps_uninit(dmn);
mutex_destroy(&dmn->info.tx.mutex);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c
index 0d6f86eb248b..68a4c32d5f34 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c
@@ -5,7 +5,7 @@
#include "dr_types.h"
struct mlx5dr_fw_recalc_cs_ft *
-mlx5dr_fw_create_recalc_cs_ft(struct mlx5dr_domain *dmn, u32 vport_num)
+mlx5dr_fw_create_recalc_cs_ft(struct mlx5dr_domain *dmn, u16 vport_num)
{
struct mlx5dr_cmd_create_flow_table_attr ft_attr = {};
struct mlx5dr_fw_recalc_cs_ft *recalc_cs_ft;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
index aca80efc28fa..323ea138ad99 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
@@ -1042,10 +1042,10 @@ static bool dr_rule_skip(enum mlx5dr_domain_type domain,
return false;
if (mask->misc.source_port) {
- if (rx && value->misc.source_port != WIRE_PORT)
+ if (rx && value->misc.source_port != MLX5_VPORT_UPLINK)
return true;
- if (!rx && value->misc.source_port == WIRE_PORT)
+ if (!rx && value->misc.source_port == MLX5_VPORT_UPLINK)
return true;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c
index 9c704bce3c12..b0649c2877dd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c
@@ -1645,7 +1645,7 @@ dr_ste_v0_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
struct mlx5dr_match_misc *misc = &value->misc;
struct mlx5dr_cmd_vport_cap *vport_cap;
struct mlx5dr_domain *dmn = sb->dmn;
- struct mlx5dr_cmd_caps *caps;
+ struct mlx5dr_domain *vport_dmn;
u8 *bit_mask = sb->bit_mask;
bool source_gvmi_set;
@@ -1654,23 +1654,24 @@ dr_ste_v0_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
if (sb->vhca_id_valid) {
/* Find port GVMI based on the eswitch_owner_vhca_id */
if (misc->source_eswitch_owner_vhca_id == dmn->info.caps.gvmi)
- caps = &dmn->info.caps;
+ vport_dmn = dmn;
else if (dmn->peer_dmn && (misc->source_eswitch_owner_vhca_id ==
dmn->peer_dmn->info.caps.gvmi))
- caps = &dmn->peer_dmn->info.caps;
+ vport_dmn = dmn->peer_dmn;
else
return -EINVAL;
misc->source_eswitch_owner_vhca_id = 0;
} else {
- caps = &dmn->info.caps;
+ vport_dmn = dmn;
}
source_gvmi_set = MLX5_GET(ste_src_gvmi_qp, bit_mask, source_gvmi);
if (source_gvmi_set) {
- vport_cap = mlx5dr_get_vport_cap(caps, misc->source_port);
+ vport_cap = mlx5dr_domain_get_vport_cap(vport_dmn,
+ misc->source_port);
if (!vport_cap) {
- mlx5dr_err(dmn, "Vport 0x%x is invalid\n",
+ mlx5dr_err(dmn, "Vport 0x%x is disabled or invalid\n",
misc->source_port);
return -EINVAL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
index b2481c99da79..cb9cf67b0a02 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
@@ -586,9 +586,11 @@ static void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
} else if (action_type_set[DR_ACTION_TYP_L2_TO_TNL_L3]) {
u8 *d_action;
- dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
- action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
- action_sz = DR_STE_ACTION_TRIPLE_SZ;
+ if (action_sz < DR_STE_ACTION_TRIPLE_SZ) {
+ dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
+ action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
+ action_sz = DR_STE_ACTION_TRIPLE_SZ;
+ }
d_action = action + DR_STE_ACTION_SINGLE_SZ;
dr_ste_v1_set_encap_l3(last_ste,
@@ -1776,7 +1778,7 @@ static int dr_ste_v1_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
struct mlx5dr_match_misc *misc = &value->misc;
struct mlx5dr_cmd_vport_cap *vport_cap;
struct mlx5dr_domain *dmn = sb->dmn;
- struct mlx5dr_cmd_caps *caps;
+ struct mlx5dr_domain *vport_dmn;
u8 *bit_mask = sb->bit_mask;
DR_STE_SET_TAG(src_gvmi_qp_v1, tag, source_qp, misc, source_sqn);
@@ -1784,22 +1786,22 @@ static int dr_ste_v1_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
if (sb->vhca_id_valid) {
/* Find port GVMI based on the eswitch_owner_vhca_id */
if (misc->source_eswitch_owner_vhca_id == dmn->info.caps.gvmi)
- caps = &dmn->info.caps;
+ vport_dmn = dmn;
else if (dmn->peer_dmn && (misc->source_eswitch_owner_vhca_id ==
dmn->peer_dmn->info.caps.gvmi))
- caps = &dmn->peer_dmn->info.caps;
+ vport_dmn = dmn->peer_dmn;
else
return -EINVAL;
- misc->source_eswitch_owner_vhca_id = 0;
+ misc->source_eswitch_owner_vhca_id = 0;
} else {
- caps = &dmn->info.caps;
+ vport_dmn = dmn;
}
if (!MLX5_GET(ste_src_gvmi_qp_v1, bit_mask, source_gvmi))
return 0;
- vport_cap = mlx5dr_get_vport_cap(caps, misc->source_port);
+ vport_cap = mlx5dr_domain_get_vport_cap(vport_dmn, misc->source_port);
if (!vport_cap) {
mlx5dr_err(dmn, "Vport 0x%x is disabled or invalid\n",
misc->source_port);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
index b20e8aabb861..73fed94af09a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
@@ -4,7 +4,7 @@
#ifndef _DR_TYPES_
#define _DR_TYPES_
-#include <linux/mlx5/driver.h>
+#include <linux/mlx5/vport.h>
#include <linux/refcount.h>
#include "fs_core.h"
#include "wq.h"
@@ -14,7 +14,6 @@
#define DR_RULE_MAX_STES 18
#define DR_ACTION_MAX_STES 5
-#define WIRE_PORT 0xFFFF
#define DR_STE_SVLAN 0x1
#define DR_STE_CVLAN 0x2
#define DR_SZ_MATCH_PARAM (MLX5_ST_SZ_DW_MATCH_PARAM * 4)
@@ -752,9 +751,9 @@ struct mlx5dr_esw_caps {
struct mlx5dr_cmd_vport_cap {
u16 vport_gvmi;
u16 vhca_gvmi;
+ u16 num;
u64 icm_address_rx;
u64 icm_address_tx;
- u32 num;
};
struct mlx5dr_roce_cap {
@@ -763,6 +762,11 @@ struct mlx5dr_roce_cap {
u8 fl_rc_qp_when_roce_enabled:1;
};
+struct mlx5dr_vports {
+ struct mlx5dr_cmd_vport_cap esw_manager_caps;
+ struct xarray vports_caps_xa;
+};
+
struct mlx5dr_cmd_caps {
u16 gvmi;
u64 nic_rx_drop_address;
@@ -786,7 +790,6 @@ struct mlx5dr_cmd_caps {
u8 flex_parser_id_gtpu_first_ext_dw_0;
u8 max_ft_level;
u16 roce_min_src_udp;
- u8 num_esw_ports;
u8 sw_format_ver;
bool eswitch_manager;
bool rx_sw_owner;
@@ -795,11 +798,11 @@ struct mlx5dr_cmd_caps {
u8 rx_sw_owner_v2:1;
u8 tx_sw_owner_v2:1;
u8 fdb_sw_owner_v2:1;
- u32 num_vports;
struct mlx5dr_esw_caps esw_caps;
- struct mlx5dr_cmd_vport_cap *vports_caps;
+ struct mlx5dr_vports vports;
bool prio_tag_required;
struct mlx5dr_roce_cap roce_caps;
+ u8 is_ecpf:1;
u8 isolate_vl_tc:1;
};
@@ -826,10 +829,6 @@ struct mlx5dr_domain_info {
struct mlx5dr_cmd_caps caps;
};
-struct mlx5dr_domain_cache {
- struct mlx5dr_fw_recalc_cs_ft **recalc_cs_ft;
-};
-
struct mlx5dr_domain {
struct mlx5dr_domain *peer_dmn;
struct mlx5_core_dev *mdev;
@@ -841,7 +840,7 @@ struct mlx5dr_domain {
struct mlx5dr_icm_pool *action_icm_pool;
struct mlx5dr_send_ring *send_ring;
struct mlx5dr_domain_info info;
- struct mlx5dr_domain_cache cache;
+ struct xarray csum_fts_xa;
struct mlx5dr_ste_ctx *ste_ctx;
};
@@ -942,7 +941,7 @@ struct mlx5dr_action_dest_tbl {
struct mlx5dr_action_ctr {
u32 ctr_id;
- u32 offeset;
+ u32 offset;
};
struct mlx5dr_action_vport {
@@ -1102,18 +1101,8 @@ mlx5dr_ste_htbl_may_grow(struct mlx5dr_ste_htbl *htbl)
return true;
}
-static inline struct mlx5dr_cmd_vport_cap *
-mlx5dr_get_vport_cap(struct mlx5dr_cmd_caps *caps, u32 vport)
-{
- if (!caps->vports_caps ||
- (vport >= caps->num_vports && vport != WIRE_PORT))
- return NULL;
-
- if (vport == WIRE_PORT)
- vport = caps->num_vports;
-
- return &caps->vports_caps[vport];
-}
+struct mlx5dr_cmd_vport_cap *
+mlx5dr_domain_get_vport_cap(struct mlx5dr_domain *dmn, u16 vport);
struct mlx5dr_cmd_query_flow_table_details {
u8 status;
@@ -1154,7 +1143,7 @@ int mlx5dr_cmd_set_fte_modify_and_vport(struct mlx5_core_dev *mdev,
u32 table_id,
u32 group_id,
u32 modify_header_id,
- u32 vport_id);
+ u16 vport_id);
int mlx5dr_cmd_del_flow_table_entry(struct mlx5_core_dev *mdev,
u32 table_type,
u32 table_id);
@@ -1372,12 +1361,12 @@ struct mlx5dr_fw_recalc_cs_ft {
};
struct mlx5dr_fw_recalc_cs_ft *
-mlx5dr_fw_create_recalc_cs_ft(struct mlx5dr_domain *dmn, u32 vport_num);
+mlx5dr_fw_create_recalc_cs_ft(struct mlx5dr_domain *dmn, u16 vport_num);
void mlx5dr_fw_destroy_recalc_cs_ft(struct mlx5dr_domain *dmn,
struct mlx5dr_fw_recalc_cs_ft *recalc_cs_ft);
-int mlx5dr_domain_cache_get_recalc_cs_ft_addr(struct mlx5dr_domain *dmn,
- u32 vport_num,
- u64 *rx_icm_addr);
+int mlx5dr_domain_get_recalc_cs_ft_addr(struct mlx5dr_domain *dmn,
+ u16 vport_num,
+ u64 *rx_icm_addr);
int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn,
struct mlx5dr_cmd_flow_destination_hw_info *dest,
int num_dest,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
index 7e58f4e594b7..2632d5ae9bc0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
@@ -222,7 +222,7 @@ static bool contain_vport_reformat_action(struct mlx5_flow_rule *dst)
dst->dest_attr.vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
}
-#define MLX5_FLOW_CONTEXT_ACTION_MAX 20
+#define MLX5_FLOW_CONTEXT_ACTION_MAX 32
static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct mlx5_flow_group *group,
@@ -625,6 +625,19 @@ static void mlx5_cmd_dr_modify_header_dealloc(struct mlx5_flow_root_namespace *n
mlx5dr_action_destroy(modify_hdr->action.dr_action);
}
+static int
+mlx5_cmd_dr_destroy_match_definer(struct mlx5_flow_root_namespace *ns,
+ int definer_id)
+{
+ return -EOPNOTSUPP;
+}
+
+static int mlx5_cmd_dr_create_match_definer(struct mlx5_flow_root_namespace *ns,
+ u16 format_id, u32 *match_mask)
+{
+ return -EOPNOTSUPP;
+}
+
static int mlx5_cmd_dr_delete_fte(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct fs_fte *fte)
@@ -727,6 +740,8 @@ static const struct mlx5_flow_cmds mlx5_flow_cmds_dr = {
.packet_reformat_dealloc = mlx5_cmd_dr_packet_reformat_dealloc,
.modify_header_alloc = mlx5_cmd_dr_modify_header_alloc,
.modify_header_dealloc = mlx5_cmd_dr_modify_header_dealloc,
+ .create_match_definer = mlx5_cmd_dr_create_match_definer,
+ .destroy_match_definer = mlx5_cmd_dr_destroy_match_definer,
.set_peer = mlx5_cmd_dr_set_peer,
.create_ns = mlx5_cmd_dr_create_ns,
.destroy_ns = mlx5_cmd_dr_destroy_ns,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
index c5a8b1601999..c7c93131b762 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
@@ -89,7 +89,7 @@ mlx5dr_action_create_dest_flow_fw_table(struct mlx5dr_domain *domain,
struct mlx5dr_action *
mlx5dr_action_create_dest_vport(struct mlx5dr_domain *domain,
- u32 vport, u8 vhca_id_valid,
+ u16 vport, u8 vhca_id_valid,
u16 vhca_id);
struct mlx5dr_action *
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index 4c1440a95ad7..8846d30a380a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -421,19 +421,21 @@ int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
{
u32 *out;
int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
+ int err;
out = kvzalloc(outlen, GFP_KERNEL);
if (!out)
return -ENOMEM;
- mlx5_query_nic_vport_context(mdev, 0, out);
+ err = mlx5_query_nic_vport_context(mdev, 0, out);
+ if (err)
+ goto out;
*system_image_guid = MLX5_GET64(query_nic_vport_context_out, out,
nic_vport_context.system_image_guid);
-
+out:
kvfree(out);
-
- return 0;
+ return err;
}
EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_system_image_guid);
@@ -1133,19 +1135,20 @@ EXPORT_SYMBOL_GPL(mlx5_nic_vport_unaffiliate_multiport);
u64 mlx5_query_nic_system_image_guid(struct mlx5_core_dev *mdev)
{
int port_type_cap = MLX5_CAP_GEN(mdev, port_type);
- u64 tmp = 0;
+ u64 tmp;
+ int err;
if (mdev->sys_image_guid)
return mdev->sys_image_guid;
if (port_type_cap == MLX5_CAP_PORT_TYPE_ETH)
- mlx5_query_nic_vport_system_image_guid(mdev, &tmp);
+ err = mlx5_query_nic_vport_system_image_guid(mdev, &tmp);
else
- mlx5_query_hca_vport_system_image_guid(mdev, &tmp);
+ err = mlx5_query_hca_vport_system_image_guid(mdev, &tmp);
- mdev->sys_image_guid = tmp;
+ mdev->sys_image_guid = err ? 0 : tmp;
- return tmp;
+ return mdev->sys_image_guid;
}
EXPORT_SYMBOL_GPL(mlx5_query_nic_system_image_guid);
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
index 6704f5c1aa32..b990782c1eb1 100644
--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
@@ -75,7 +75,7 @@ static void mlxbf_gige_initial_mac(struct mlxbf_gige *priv)
u64_to_ether_addr(local_mac, mac);
if (is_valid_ether_addr(mac)) {
- ether_addr_copy(priv->netdev->dev_addr, mac);
+ eth_hw_addr_set(priv->netdev, mac);
} else {
/* Provide a random MAC if for some reason the device has
* not been configured with a valid MAC address already.
diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw.h b/drivers/net/ethernet/mellanox/mlxfw/mlxfw.h
index 7654841a05c2..e6475ea77cd1 100644
--- a/drivers/net/ethernet/mellanox/mlxfw/mlxfw.h
+++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw.h
@@ -19,7 +19,7 @@ struct mlxfw_dev {
static inline
struct device *mlxfw_dev_dev(struct mlxfw_dev *mlxfw_dev)
{
- return mlxfw_dev->devlink->dev;
+ return devlink_to_dev(mlxfw_dev->devlink);
}
#define MLXFW_PRFX "mlxfw: "
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index f080fab3de2b..3fd3812b8f31 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -90,7 +90,6 @@ struct mlxsw_core {
struct devlink_health_reporter *fw_fatal;
} health;
struct mlxsw_env *env;
- bool is_initialized; /* Denotes if core was already initialized. */
unsigned long driver_priv[];
/* driver_priv has to be always the last item */
};
@@ -1975,12 +1974,6 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
goto err_emad_init;
if (!reload) {
- err = devlink_register(devlink);
- if (err)
- goto err_devlink_register;
- }
-
- if (!reload) {
err = mlxsw_core_params_register(mlxsw_core);
if (err)
goto err_register_params;
@@ -1995,12 +1988,6 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
if (err)
goto err_health_init;
- if (mlxsw_driver->init) {
- err = mlxsw_driver->init(mlxsw_core, mlxsw_bus_info, extack);
- if (err)
- goto err_driver_init;
- }
-
err = mlxsw_hwmon_init(mlxsw_core, mlxsw_bus_info, &mlxsw_core->hwmon);
if (err)
goto err_hwmon_init;
@@ -2014,31 +2001,31 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
if (err)
goto err_env_init;
- mlxsw_core->is_initialized = true;
- devlink_params_publish(devlink);
-
- if (!reload)
- devlink_reload_enable(devlink);
+ if (mlxsw_driver->init) {
+ err = mlxsw_driver->init(mlxsw_core, mlxsw_bus_info, extack);
+ if (err)
+ goto err_driver_init;
+ }
+ if (!reload) {
+ devlink_set_features(devlink, DEVLINK_F_RELOAD);
+ devlink_register(devlink);
+ }
return 0;
+err_driver_init:
+ mlxsw_env_fini(mlxsw_core->env);
err_env_init:
mlxsw_thermal_fini(mlxsw_core->thermal);
err_thermal_init:
mlxsw_hwmon_fini(mlxsw_core->hwmon);
err_hwmon_init:
- if (mlxsw_core->driver->fini)
- mlxsw_core->driver->fini(mlxsw_core);
-err_driver_init:
mlxsw_core_health_fini(mlxsw_core);
err_health_init:
err_fw_rev_validate:
if (!reload)
mlxsw_core_params_unregister(mlxsw_core);
err_register_params:
- if (!reload)
- devlink_unregister(devlink);
-err_devlink_register:
mlxsw_emad_fini(mlxsw_core);
err_emad_init:
kfree(mlxsw_core->lag.mapping);
@@ -2088,7 +2075,8 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
struct devlink *devlink = priv_to_devlink(mlxsw_core);
if (!reload)
- devlink_reload_disable(devlink);
+ devlink_unregister(devlink);
+
if (devlink_is_reload_failed(devlink)) {
if (!reload)
/* Only the parts that were not de-initialized in the
@@ -2099,18 +2087,14 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
return;
}
- devlink_params_unpublish(devlink);
- mlxsw_core->is_initialized = false;
+ if (mlxsw_core->driver->fini)
+ mlxsw_core->driver->fini(mlxsw_core);
mlxsw_env_fini(mlxsw_core->env);
mlxsw_thermal_fini(mlxsw_core->thermal);
mlxsw_hwmon_fini(mlxsw_core->hwmon);
- if (mlxsw_core->driver->fini)
- mlxsw_core->driver->fini(mlxsw_core);
mlxsw_core_health_fini(mlxsw_core);
if (!reload)
mlxsw_core_params_unregister(mlxsw_core);
- if (!reload)
- devlink_unregister(devlink);
mlxsw_emad_fini(mlxsw_core);
kfree(mlxsw_core->lag.mapping);
mlxsw_ports_fini(mlxsw_core, reload);
@@ -2124,7 +2108,6 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
reload_fail_deinit:
mlxsw_core_params_unregister(mlxsw_core);
- devlink_unregister(devlink);
devlink_resources_unregister(devlink, NULL);
devlink_free(devlink);
}
@@ -2939,49 +2922,6 @@ struct mlxsw_env *mlxsw_core_env(const struct mlxsw_core *mlxsw_core)
return mlxsw_core->env;
}
-bool mlxsw_core_is_initialized(const struct mlxsw_core *mlxsw_core)
-{
- return mlxsw_core->is_initialized;
-}
-
-int mlxsw_core_module_max_width(struct mlxsw_core *mlxsw_core, u8 module)
-{
- enum mlxsw_reg_pmtm_module_type module_type;
- char pmtm_pl[MLXSW_REG_PMTM_LEN];
- int err;
-
- mlxsw_reg_pmtm_pack(pmtm_pl, module);
- err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(pmtm), pmtm_pl);
- if (err)
- return err;
- mlxsw_reg_pmtm_unpack(pmtm_pl, &module_type);
-
- /* Here we need to get the module width according to the module type. */
-
- switch (module_type) {
- case MLXSW_REG_PMTM_MODULE_TYPE_C2C8X:
- case MLXSW_REG_PMTM_MODULE_TYPE_QSFP_DD:
- case MLXSW_REG_PMTM_MODULE_TYPE_OSFP:
- return 8;
- case MLXSW_REG_PMTM_MODULE_TYPE_C2C4X:
- case MLXSW_REG_PMTM_MODULE_TYPE_BP_4X:
- case MLXSW_REG_PMTM_MODULE_TYPE_QSFP:
- return 4;
- case MLXSW_REG_PMTM_MODULE_TYPE_C2C2X:
- case MLXSW_REG_PMTM_MODULE_TYPE_BP_2X:
- case MLXSW_REG_PMTM_MODULE_TYPE_SFP_DD:
- case MLXSW_REG_PMTM_MODULE_TYPE_DSFP:
- return 2;
- case MLXSW_REG_PMTM_MODULE_TYPE_C2C1X:
- case MLXSW_REG_PMTM_MODULE_TYPE_BP_1X:
- case MLXSW_REG_PMTM_MODULE_TYPE_SFP:
- return 1;
- default:
- return -EINVAL;
- }
-}
-EXPORT_SYMBOL(mlxsw_core_module_max_width);
-
static void mlxsw_core_buf_dump_dbg(struct mlxsw_core *mlxsw_core,
const char *buf, size_t size)
{
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index 80712dc803d0..12023a550007 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -249,8 +249,6 @@ mlxsw_core_port_devlink_port_get(struct mlxsw_core *mlxsw_core,
u8 local_port);
bool mlxsw_core_port_is_xm(const struct mlxsw_core *mlxsw_core, u8 local_port);
struct mlxsw_env *mlxsw_core_env(const struct mlxsw_core *mlxsw_core);
-bool mlxsw_core_is_initialized(const struct mlxsw_core *mlxsw_core);
-int mlxsw_core_module_max_width(struct mlxsw_core *mlxsw_core, u8 module);
int mlxsw_core_schedule_dw(struct delayed_work *dwork, unsigned long delay);
bool mlxsw_core_schedule_work(struct work_struct *work);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.c b/drivers/net/ethernet/mellanox/mlxsw/core_env.c
index 3713c45cfa1e..6dd4ae2f45f4 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_env.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.c
@@ -5,6 +5,7 @@
#include <linux/err.h>
#include <linux/ethtool.h>
#include <linux/sfp.h>
+#include <linux/mutex.h>
#include "core.h"
#include "core_env.h"
@@ -14,12 +15,15 @@
struct mlxsw_env_module_info {
u64 module_overheat_counter;
bool is_overheat;
+ int num_ports_mapped;
+ int num_ports_up;
+ enum ethtool_module_power_mode_policy power_mode_policy;
};
struct mlxsw_env {
struct mlxsw_core *core;
u8 module_count;
- spinlock_t module_info_lock; /* Protects 'module_info'. */
+ struct mutex module_info_lock; /* Protects 'module_info'. */
struct mlxsw_env_module_info module_info[];
};
@@ -389,6 +393,205 @@ mlxsw_env_get_module_eeprom_by_page(struct mlxsw_core *mlxsw_core, u8 module,
}
EXPORT_SYMBOL(mlxsw_env_get_module_eeprom_by_page);
+static int mlxsw_env_module_reset(struct mlxsw_core *mlxsw_core, u8 module)
+{
+ char pmaos_pl[MLXSW_REG_PMAOS_LEN];
+
+ mlxsw_reg_pmaos_pack(pmaos_pl, module);
+ mlxsw_reg_pmaos_rst_set(pmaos_pl, true);
+
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(pmaos), pmaos_pl);
+}
+
+int mlxsw_env_reset_module(struct net_device *netdev,
+ struct mlxsw_core *mlxsw_core, u8 module, u32 *flags)
+{
+ struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
+ u32 req = *flags;
+ int err;
+
+ if (!(req & ETH_RESET_PHY) &&
+ !(req & (ETH_RESET_PHY << ETH_RESET_SHARED_SHIFT)))
+ return 0;
+
+ if (WARN_ON_ONCE(module >= mlxsw_env->module_count))
+ return -EINVAL;
+
+ mutex_lock(&mlxsw_env->module_info_lock);
+
+ if (mlxsw_env->module_info[module].num_ports_up) {
+ netdev_err(netdev, "Cannot reset module when ports using it are administratively up\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (mlxsw_env->module_info[module].num_ports_mapped > 1 &&
+ !(req & (ETH_RESET_PHY << ETH_RESET_SHARED_SHIFT))) {
+ netdev_err(netdev, "Cannot reset module without \"phy-shared\" flag when shared by multiple ports\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ err = mlxsw_env_module_reset(mlxsw_core, module);
+ if (err) {
+ netdev_err(netdev, "Failed to reset module\n");
+ goto out;
+ }
+
+ *flags &= ~(ETH_RESET_PHY | (ETH_RESET_PHY << ETH_RESET_SHARED_SHIFT));
+
+out:
+ mutex_unlock(&mlxsw_env->module_info_lock);
+ return err;
+}
+EXPORT_SYMBOL(mlxsw_env_reset_module);
+
+int
+mlxsw_env_get_module_power_mode(struct mlxsw_core *mlxsw_core, u8 module,
+ struct ethtool_module_power_mode_params *params,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
+ char mcion_pl[MLXSW_REG_MCION_LEN];
+ u32 status_bits;
+ int err;
+
+ if (WARN_ON_ONCE(module >= mlxsw_env->module_count))
+ return -EINVAL;
+
+ mutex_lock(&mlxsw_env->module_info_lock);
+
+ params->policy = mlxsw_env->module_info[module].power_mode_policy;
+
+ mlxsw_reg_mcion_pack(mcion_pl, module);
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mcion), mcion_pl);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to retrieve module's power mode");
+ goto out;
+ }
+
+ status_bits = mlxsw_reg_mcion_module_status_bits_get(mcion_pl);
+ if (!(status_bits & MLXSW_REG_MCION_MODULE_STATUS_BITS_PRESENT_MASK))
+ goto out;
+
+ if (status_bits & MLXSW_REG_MCION_MODULE_STATUS_BITS_LOW_POWER_MASK)
+ params->mode = ETHTOOL_MODULE_POWER_MODE_LOW;
+ else
+ params->mode = ETHTOOL_MODULE_POWER_MODE_HIGH;
+
+out:
+ mutex_unlock(&mlxsw_env->module_info_lock);
+ return err;
+}
+EXPORT_SYMBOL(mlxsw_env_get_module_power_mode);
+
+static int mlxsw_env_module_enable_set(struct mlxsw_core *mlxsw_core,
+ u8 module, bool enable)
+{
+ enum mlxsw_reg_pmaos_admin_status admin_status;
+ char pmaos_pl[MLXSW_REG_PMAOS_LEN];
+
+ mlxsw_reg_pmaos_pack(pmaos_pl, module);
+ admin_status = enable ? MLXSW_REG_PMAOS_ADMIN_STATUS_ENABLED :
+ MLXSW_REG_PMAOS_ADMIN_STATUS_DISABLED;
+ mlxsw_reg_pmaos_admin_status_set(pmaos_pl, admin_status);
+ mlxsw_reg_pmaos_ase_set(pmaos_pl, true);
+
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(pmaos), pmaos_pl);
+}
+
+static int mlxsw_env_module_low_power_set(struct mlxsw_core *mlxsw_core,
+ u8 module, bool low_power)
+{
+ u16 eeprom_override_mask, eeprom_override;
+ char pmmp_pl[MLXSW_REG_PMMP_LEN];
+
+ mlxsw_reg_pmmp_pack(pmmp_pl, module);
+ mlxsw_reg_pmmp_sticky_set(pmmp_pl, true);
+ /* Mask all the bits except low power mode. */
+ eeprom_override_mask = ~MLXSW_REG_PMMP_EEPROM_OVERRIDE_LOW_POWER_MASK;
+ mlxsw_reg_pmmp_eeprom_override_mask_set(pmmp_pl, eeprom_override_mask);
+ eeprom_override = low_power ? MLXSW_REG_PMMP_EEPROM_OVERRIDE_LOW_POWER_MASK :
+ 0;
+ mlxsw_reg_pmmp_eeprom_override_set(pmmp_pl, eeprom_override);
+
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(pmmp), pmmp_pl);
+}
+
+static int __mlxsw_env_set_module_power_mode(struct mlxsw_core *mlxsw_core,
+ u8 module, bool low_power,
+ struct netlink_ext_ack *extack)
+{
+ int err;
+
+ err = mlxsw_env_module_enable_set(mlxsw_core, module, false);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to disable module");
+ return err;
+ }
+
+ err = mlxsw_env_module_low_power_set(mlxsw_core, module, low_power);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to set module's power mode");
+ goto err_module_low_power_set;
+ }
+
+ err = mlxsw_env_module_enable_set(mlxsw_core, module, true);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to enable module");
+ goto err_module_enable_set;
+ }
+
+ return 0;
+
+err_module_enable_set:
+ mlxsw_env_module_low_power_set(mlxsw_core, module, !low_power);
+err_module_low_power_set:
+ mlxsw_env_module_enable_set(mlxsw_core, module, true);
+ return err;
+}
+
+int
+mlxsw_env_set_module_power_mode(struct mlxsw_core *mlxsw_core, u8 module,
+ enum ethtool_module_power_mode_policy policy,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
+ bool low_power;
+ int err = 0;
+
+ if (WARN_ON_ONCE(module >= mlxsw_env->module_count))
+ return -EINVAL;
+
+ if (policy != ETHTOOL_MODULE_POWER_MODE_POLICY_HIGH &&
+ policy != ETHTOOL_MODULE_POWER_MODE_POLICY_AUTO) {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported power mode policy");
+ return -EOPNOTSUPP;
+ }
+
+ mutex_lock(&mlxsw_env->module_info_lock);
+
+ if (mlxsw_env->module_info[module].power_mode_policy == policy)
+ goto out;
+
+ /* If any ports are up, we are already in high power mode. */
+ if (mlxsw_env->module_info[module].num_ports_up)
+ goto out_set_policy;
+
+ low_power = policy == ETHTOOL_MODULE_POWER_MODE_POLICY_AUTO;
+ err = __mlxsw_env_set_module_power_mode(mlxsw_core, module, low_power,
+ extack);
+ if (err)
+ goto out;
+
+out_set_policy:
+ mlxsw_env->module_info[module].power_mode_policy = policy;
+out:
+ mutex_unlock(&mlxsw_env->module_info_lock);
+ return err;
+}
+EXPORT_SYMBOL(mlxsw_env_set_module_power_mode);
+
static int mlxsw_env_module_has_temp_sensor(struct mlxsw_core *mlxsw_core,
u8 module,
bool *p_has_temp_sensor)
@@ -482,22 +685,32 @@ static int mlxsw_env_module_temp_event_enable(struct mlxsw_core *mlxsw_core,
return 0;
}
-static void mlxsw_env_mtwe_event_func(const struct mlxsw_reg_info *reg,
- char *mtwe_pl, void *priv)
+struct mlxsw_env_module_temp_warn_event {
+ struct mlxsw_env *mlxsw_env;
+ char mtwe_pl[MLXSW_REG_MTWE_LEN];
+ struct work_struct work;
+};
+
+static void mlxsw_env_mtwe_event_work(struct work_struct *work)
{
- struct mlxsw_env *mlxsw_env = priv;
+ struct mlxsw_env_module_temp_warn_event *event;
+ struct mlxsw_env *mlxsw_env;
int i, sensor_warning;
bool is_overheat;
+ event = container_of(work, struct mlxsw_env_module_temp_warn_event,
+ work);
+ mlxsw_env = event->mlxsw_env;
+
for (i = 0; i < mlxsw_env->module_count; i++) {
/* 64-127 of sensor_index are mapped to the port modules
* sequentially (module 0 is mapped to sensor_index 64,
* module 1 to sensor_index 65 and so on)
*/
sensor_warning =
- mlxsw_reg_mtwe_sensor_warning_get(mtwe_pl,
+ mlxsw_reg_mtwe_sensor_warning_get(event->mtwe_pl,
i + MLXSW_REG_MTMP_MODULE_INDEX_MIN);
- spin_lock(&mlxsw_env->module_info_lock);
+ mutex_lock(&mlxsw_env->module_info_lock);
is_overheat =
mlxsw_env->module_info[i].is_overheat;
@@ -507,13 +720,13 @@ static void mlxsw_env_mtwe_event_func(const struct mlxsw_reg_info *reg,
* warning OR current state in "no warning" and MTWE
* does not report warning.
*/
- spin_unlock(&mlxsw_env->module_info_lock);
+ mutex_unlock(&mlxsw_env->module_info_lock);
continue;
} else if (is_overheat && !sensor_warning) {
/* MTWE reports "no warning", turn is_overheat off.
*/
mlxsw_env->module_info[i].is_overheat = false;
- spin_unlock(&mlxsw_env->module_info_lock);
+ mutex_unlock(&mlxsw_env->module_info_lock);
} else {
/* Current state is "no warning" and MTWE reports
* "warning", increase the counter and turn is_overheat
@@ -521,13 +734,32 @@ static void mlxsw_env_mtwe_event_func(const struct mlxsw_reg_info *reg,
*/
mlxsw_env->module_info[i].is_overheat = true;
mlxsw_env->module_info[i].module_overheat_counter++;
- spin_unlock(&mlxsw_env->module_info_lock);
+ mutex_unlock(&mlxsw_env->module_info_lock);
}
}
+
+ kfree(event);
+}
+
+static void
+mlxsw_env_mtwe_listener_func(const struct mlxsw_reg_info *reg, char *mtwe_pl,
+ void *priv)
+{
+ struct mlxsw_env_module_temp_warn_event *event;
+ struct mlxsw_env *mlxsw_env = priv;
+
+ event = kmalloc(sizeof(*event), GFP_ATOMIC);
+ if (!event)
+ return;
+
+ event->mlxsw_env = mlxsw_env;
+ memcpy(event->mtwe_pl, mtwe_pl, MLXSW_REG_MTWE_LEN);
+ INIT_WORK(&event->work, mlxsw_env_mtwe_event_work);
+ mlxsw_core_schedule_work(&event->work);
}
static const struct mlxsw_listener mlxsw_env_temp_warn_listener =
- MLXSW_EVENTL(mlxsw_env_mtwe_event_func, MTWE, MTWE);
+ MLXSW_EVENTL(mlxsw_env_mtwe_listener_func, MTWE, MTWE);
static int mlxsw_env_temp_warn_event_register(struct mlxsw_core *mlxsw_core)
{
@@ -568,9 +800,9 @@ static void mlxsw_env_pmpe_event_work(struct work_struct *work)
work);
mlxsw_env = event->mlxsw_env;
- spin_lock_bh(&mlxsw_env->module_info_lock);
+ mutex_lock(&mlxsw_env->module_info_lock);
mlxsw_env->module_info[event->module].is_overheat = false;
- spin_unlock_bh(&mlxsw_env->module_info_lock);
+ mutex_unlock(&mlxsw_env->module_info_lock);
err = mlxsw_env_module_has_temp_sensor(mlxsw_env->core, event->module,
&has_temp_sensor);
@@ -652,8 +884,10 @@ mlxsw_env_module_oper_state_event_enable(struct mlxsw_core *mlxsw_core,
for (i = 0; i < module_count; i++) {
char pmaos_pl[MLXSW_REG_PMAOS_LEN];
- mlxsw_reg_pmaos_pack(pmaos_pl, i,
- MLXSW_REG_PMAOS_E_GENERATE_EVENT);
+ mlxsw_reg_pmaos_pack(pmaos_pl, i);
+ mlxsw_reg_pmaos_e_set(pmaos_pl,
+ MLXSW_REG_PMAOS_E_GENERATE_EVENT);
+ mlxsw_reg_pmaos_ee_set(pmaos_pl, true);
err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(pmaos), pmaos_pl);
if (err)
return err;
@@ -667,29 +901,110 @@ mlxsw_env_module_overheat_counter_get(struct mlxsw_core *mlxsw_core, u8 module,
{
struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
- /* Prevent switch driver from accessing uninitialized data. */
- if (!mlxsw_core_is_initialized(mlxsw_core)) {
- *p_counter = 0;
- return 0;
- }
-
if (WARN_ON_ONCE(module >= mlxsw_env->module_count))
return -EINVAL;
- spin_lock_bh(&mlxsw_env->module_info_lock);
+ mutex_lock(&mlxsw_env->module_info_lock);
*p_counter = mlxsw_env->module_info[module].module_overheat_counter;
- spin_unlock_bh(&mlxsw_env->module_info_lock);
+ mutex_unlock(&mlxsw_env->module_info_lock);
return 0;
}
EXPORT_SYMBOL(mlxsw_env_module_overheat_counter_get);
+void mlxsw_env_module_port_map(struct mlxsw_core *mlxsw_core, u8 module)
+{
+ struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
+
+ if (WARN_ON_ONCE(module >= mlxsw_env->module_count))
+ return;
+
+ mutex_lock(&mlxsw_env->module_info_lock);
+ mlxsw_env->module_info[module].num_ports_mapped++;
+ mutex_unlock(&mlxsw_env->module_info_lock);
+}
+EXPORT_SYMBOL(mlxsw_env_module_port_map);
+
+void mlxsw_env_module_port_unmap(struct mlxsw_core *mlxsw_core, u8 module)
+{
+ struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
+
+ if (WARN_ON_ONCE(module >= mlxsw_env->module_count))
+ return;
+
+ mutex_lock(&mlxsw_env->module_info_lock);
+ mlxsw_env->module_info[module].num_ports_mapped--;
+ mutex_unlock(&mlxsw_env->module_info_lock);
+}
+EXPORT_SYMBOL(mlxsw_env_module_port_unmap);
+
+int mlxsw_env_module_port_up(struct mlxsw_core *mlxsw_core, u8 module)
+{
+ struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
+ int err = 0;
+
+ if (WARN_ON_ONCE(module >= mlxsw_env->module_count))
+ return -EINVAL;
+
+ mutex_lock(&mlxsw_env->module_info_lock);
+
+ if (mlxsw_env->module_info[module].power_mode_policy !=
+ ETHTOOL_MODULE_POWER_MODE_POLICY_AUTO)
+ goto out_inc;
+
+ if (mlxsw_env->module_info[module].num_ports_up != 0)
+ goto out_inc;
+
+ /* Transition to high power mode following first port using the module
+ * being put administratively up.
+ */
+ err = __mlxsw_env_set_module_power_mode(mlxsw_core, module, false,
+ NULL);
+ if (err)
+ goto out_unlock;
+
+out_inc:
+ mlxsw_env->module_info[module].num_ports_up++;
+out_unlock:
+ mutex_unlock(&mlxsw_env->module_info_lock);
+ return err;
+}
+EXPORT_SYMBOL(mlxsw_env_module_port_up);
+
+void mlxsw_env_module_port_down(struct mlxsw_core *mlxsw_core, u8 module)
+{
+ struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
+
+ if (WARN_ON_ONCE(module >= mlxsw_env->module_count))
+ return;
+
+ mutex_lock(&mlxsw_env->module_info_lock);
+
+ mlxsw_env->module_info[module].num_ports_up--;
+
+ if (mlxsw_env->module_info[module].power_mode_policy !=
+ ETHTOOL_MODULE_POWER_MODE_POLICY_AUTO)
+ goto out_unlock;
+
+ if (mlxsw_env->module_info[module].num_ports_up != 0)
+ goto out_unlock;
+
+ /* Transition to low power mode following last port using the module
+ * being put administratively down.
+ */
+ __mlxsw_env_set_module_power_mode(mlxsw_core, module, true, NULL);
+
+out_unlock:
+ mutex_unlock(&mlxsw_env->module_info_lock);
+}
+EXPORT_SYMBOL(mlxsw_env_module_port_down);
+
int mlxsw_env_init(struct mlxsw_core *mlxsw_core, struct mlxsw_env **p_env)
{
char mgpir_pl[MLXSW_REG_MGPIR_LEN];
struct mlxsw_env *env;
u8 module_count;
- int err;
+ int i, err;
mlxsw_reg_mgpir_pack(mgpir_pl);
err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mgpir), mgpir_pl);
@@ -702,7 +1017,14 @@ int mlxsw_env_init(struct mlxsw_core *mlxsw_core, struct mlxsw_env **p_env)
if (!env)
return -ENOMEM;
- spin_lock_init(&env->module_info_lock);
+ /* Firmware defaults to high power mode policy where modules are
+ * transitioned to high power mode following plug-in.
+ */
+ for (i = 0; i < module_count; i++)
+ env->module_info[i].power_mode_policy =
+ ETHTOOL_MODULE_POWER_MODE_POLICY_HIGH;
+
+ mutex_init(&env->module_info_lock);
env->core = mlxsw_core;
env->module_count = module_count;
*p_env = env;
@@ -732,6 +1054,7 @@ err_oper_state_event_enable:
err_module_plug_event_register:
mlxsw_env_temp_warn_event_unregister(env);
err_temp_warn_event_register:
+ mutex_destroy(&env->module_info_lock);
kfree(env);
return err;
}
@@ -742,5 +1065,6 @@ void mlxsw_env_fini(struct mlxsw_env *env)
/* Make sure there is no more event work scheduled. */
mlxsw_core_flush_owq();
mlxsw_env_temp_warn_event_unregister(env);
+ mutex_destroy(&env->module_info_lock);
kfree(env);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.h b/drivers/net/ethernet/mellanox/mlxsw/core_env.h
index 0bf5bd0f8a7e..da121b1a84b4 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_env.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.h
@@ -24,9 +24,32 @@ mlxsw_env_get_module_eeprom_by_page(struct mlxsw_core *mlxsw_core, u8 module,
const struct ethtool_module_eeprom *page,
struct netlink_ext_ack *extack);
+int mlxsw_env_reset_module(struct net_device *netdev,
+ struct mlxsw_core *mlxsw_core, u8 module,
+ u32 *flags);
+
+int
+mlxsw_env_get_module_power_mode(struct mlxsw_core *mlxsw_core, u8 module,
+ struct ethtool_module_power_mode_params *params,
+ struct netlink_ext_ack *extack);
+
+int
+mlxsw_env_set_module_power_mode(struct mlxsw_core *mlxsw_core, u8 module,
+ enum ethtool_module_power_mode_policy policy,
+ struct netlink_ext_ack *extack);
+
int
mlxsw_env_module_overheat_counter_get(struct mlxsw_core *mlxsw_core, u8 module,
u64 *p_counter);
+
+void mlxsw_env_module_port_map(struct mlxsw_core *mlxsw_core, u8 module);
+
+void mlxsw_env_module_port_unmap(struct mlxsw_core *mlxsw_core, u8 module);
+
+int mlxsw_env_module_port_up(struct mlxsw_core *mlxsw_core, u8 module);
+
+void mlxsw_env_module_port_down(struct mlxsw_core *mlxsw_core, u8 module);
+
int mlxsw_env_init(struct mlxsw_core *core, struct mlxsw_env **p_env);
void mlxsw_env_fini(struct mlxsw_env *env);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/item.h b/drivers/net/ethernet/mellanox/mlxsw/item.h
index e92cadc98128..ab70a873a01a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/item.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/item.h
@@ -270,11 +270,13 @@ static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
.size = {.bits = _sizebits,}, \
.name = #_type "_" #_cname "_" #_iname, \
}; \
-static inline u8 mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf) \
+static inline u8 __maybe_unused \
+mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf) \
{ \
return __mlxsw_item_get8(buf, &__ITEM_NAME(_type, _cname, _iname), 0); \
} \
-static inline void mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u8 val)\
+static inline void __maybe_unused \
+mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u8 val) \
{ \
__mlxsw_item_set8(buf, &__ITEM_NAME(_type, _cname, _iname), 0, val); \
}
@@ -290,13 +292,13 @@ static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
.size = {.bits = _sizebits,}, \
.name = #_type "_" #_cname "_" #_iname, \
}; \
-static inline u8 \
+static inline u8 __maybe_unused \
mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf, unsigned short index)\
{ \
return __mlxsw_item_get8(buf, &__ITEM_NAME(_type, _cname, _iname), \
index); \
} \
-static inline void \
+static inline void __maybe_unused \
mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, unsigned short index, \
u8 val) \
{ \
@@ -311,11 +313,13 @@ static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
.size = {.bits = _sizebits,}, \
.name = #_type "_" #_cname "_" #_iname, \
}; \
-static inline u16 mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf) \
+static inline u16 __maybe_unused \
+mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf) \
{ \
return __mlxsw_item_get16(buf, &__ITEM_NAME(_type, _cname, _iname), 0); \
} \
-static inline void mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u16 val)\
+static inline void __maybe_unused \
+mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u16 val) \
{ \
__mlxsw_item_set16(buf, &__ITEM_NAME(_type, _cname, _iname), 0, val); \
}
@@ -331,13 +335,13 @@ static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
.size = {.bits = _sizebits,}, \
.name = #_type "_" #_cname "_" #_iname, \
}; \
-static inline u16 \
+static inline u16 __maybe_unused \
mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf, unsigned short index)\
{ \
return __mlxsw_item_get16(buf, &__ITEM_NAME(_type, _cname, _iname), \
index); \
} \
-static inline void \
+static inline void __maybe_unused \
mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, unsigned short index, \
u16 val) \
{ \
@@ -352,11 +356,13 @@ static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
.size = {.bits = _sizebits,}, \
.name = #_type "_" #_cname "_" #_iname, \
}; \
-static inline u32 mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf) \
+static inline u32 __maybe_unused \
+mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf) \
{ \
return __mlxsw_item_get32(buf, &__ITEM_NAME(_type, _cname, _iname), 0); \
} \
-static inline void mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u32 val)\
+static inline void __maybe_unused \
+mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u32 val) \
{ \
__mlxsw_item_set32(buf, &__ITEM_NAME(_type, _cname, _iname), 0, val); \
}
@@ -372,13 +378,13 @@ static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
.size = {.bits = _sizebits,}, \
.name = #_type "_" #_cname "_" #_iname, \
}; \
-static inline u32 \
+static inline u32 __maybe_unused \
mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf, unsigned short index)\
{ \
return __mlxsw_item_get32(buf, &__ITEM_NAME(_type, _cname, _iname), \
index); \
} \
-static inline void \
+static inline void __maybe_unused \
mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, unsigned short index, \
u32 val) \
{ \
@@ -393,11 +399,13 @@ static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
.size = {.bits = _sizebits,}, \
.name = #_type "_" #_cname "_" #_iname, \
}; \
-static inline u64 mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf) \
+static inline u64 __maybe_unused \
+mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf) \
{ \
return __mlxsw_item_get64(buf, &__ITEM_NAME(_type, _cname, _iname), 0); \
} \
-static inline void mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u64 val)\
+static inline void __maybe_unused \
+mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u64 val) \
{ \
__mlxsw_item_set64(buf, &__ITEM_NAME(_type, _cname, _iname), 0, val); \
}
@@ -413,13 +421,13 @@ static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
.size = {.bits = _sizebits,}, \
.name = #_type "_" #_cname "_" #_iname, \
}; \
-static inline u64 \
+static inline u64 __maybe_unused \
mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf, unsigned short index)\
{ \
return __mlxsw_item_get64(buf, &__ITEM_NAME(_type, _cname, _iname), \
index); \
} \
-static inline void \
+static inline void __maybe_unused \
mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, unsigned short index, \
u64 val) \
{ \
@@ -433,19 +441,19 @@ static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
.size = {.bytes = _sizebytes,}, \
.name = #_type "_" #_cname "_" #_iname, \
}; \
-static inline void \
+static inline void __maybe_unused \
mlxsw_##_type##_##_cname##_##_iname##_memcpy_from(const char *buf, char *dst) \
{ \
__mlxsw_item_memcpy_from(buf, dst, \
&__ITEM_NAME(_type, _cname, _iname), 0); \
} \
-static inline void \
+static inline void __maybe_unused \
mlxsw_##_type##_##_cname##_##_iname##_memcpy_to(char *buf, const char *src) \
{ \
__mlxsw_item_memcpy_to(buf, src, \
&__ITEM_NAME(_type, _cname, _iname), 0); \
} \
-static inline char * \
+static inline char * __maybe_unused \
mlxsw_##_type##_##_cname##_##_iname##_data(char *buf) \
{ \
return __mlxsw_item_data(buf, &__ITEM_NAME(_type, _cname, _iname), 0); \
@@ -460,7 +468,7 @@ static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
.size = {.bytes = _sizebytes,}, \
.name = #_type "_" #_cname "_" #_iname, \
}; \
-static inline void \
+static inline void __maybe_unused \
mlxsw_##_type##_##_cname##_##_iname##_memcpy_from(const char *buf, \
unsigned short index, \
char *dst) \
@@ -468,7 +476,7 @@ mlxsw_##_type##_##_cname##_##_iname##_memcpy_from(const char *buf, \
__mlxsw_item_memcpy_from(buf, dst, \
&__ITEM_NAME(_type, _cname, _iname), index); \
} \
-static inline void \
+static inline void __maybe_unused \
mlxsw_##_type##_##_cname##_##_iname##_memcpy_to(char *buf, \
unsigned short index, \
const char *src) \
@@ -476,7 +484,7 @@ mlxsw_##_type##_##_cname##_##_iname##_memcpy_to(char *buf, \
__mlxsw_item_memcpy_to(buf, src, \
&__ITEM_NAME(_type, _cname, _iname), index); \
} \
-static inline char * \
+static inline char * __maybe_unused \
mlxsw_##_type##_##_cname##_##_iname##_data(char *buf, unsigned short index) \
{ \
return __mlxsw_item_data(buf, \
@@ -491,14 +499,14 @@ static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
.size = {.bytes = _sizebytes,}, \
.name = #_type "_" #_cname "_" #_iname, \
}; \
-static inline u8 \
+static inline u8 __maybe_unused \
mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf, u16 index) \
{ \
return __mlxsw_item_bit_array_get(buf, \
&__ITEM_NAME(_type, _cname, _iname), \
index); \
} \
-static inline void \
+static inline void __maybe_unused \
mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u16 index, u8 val) \
{ \
return __mlxsw_item_bit_array_set(buf, \
diff --git a/drivers/net/ethernet/mellanox/mlxsw/minimal.c b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
index d9d56c44e994..5d4dfa5ddbb5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/minimal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
@@ -54,8 +54,20 @@ static int mlxsw_m_base_mac_get(struct mlxsw_m *mlxsw_m)
return 0;
}
-static int mlxsw_m_port_dummy_open_stop(struct net_device *dev)
+static int mlxsw_m_port_open(struct net_device *dev)
{
+ struct mlxsw_m_port *mlxsw_m_port = netdev_priv(dev);
+ struct mlxsw_m *mlxsw_m = mlxsw_m_port->mlxsw_m;
+
+ return mlxsw_env_module_port_up(mlxsw_m->core, mlxsw_m_port->module);
+}
+
+static int mlxsw_m_port_stop(struct net_device *dev)
+{
+ struct mlxsw_m_port *mlxsw_m_port = netdev_priv(dev);
+ struct mlxsw_m *mlxsw_m = mlxsw_m_port->mlxsw_m;
+
+ mlxsw_env_module_port_down(mlxsw_m->core, mlxsw_m_port->module);
return 0;
}
@@ -70,8 +82,8 @@ mlxsw_m_port_get_devlink_port(struct net_device *dev)
}
static const struct net_device_ops mlxsw_m_port_netdev_ops = {
- .ndo_open = mlxsw_m_port_dummy_open_stop,
- .ndo_stop = mlxsw_m_port_dummy_open_stop,
+ .ndo_open = mlxsw_m_port_open,
+ .ndo_stop = mlxsw_m_port_stop,
.ndo_get_devlink_port = mlxsw_m_port_get_devlink_port,
};
@@ -124,11 +136,47 @@ mlxsw_m_get_module_eeprom_by_page(struct net_device *netdev,
page, extack);
}
+static int mlxsw_m_reset(struct net_device *netdev, u32 *flags)
+{
+ struct mlxsw_m_port *mlxsw_m_port = netdev_priv(netdev);
+ struct mlxsw_core *core = mlxsw_m_port->mlxsw_m->core;
+
+ return mlxsw_env_reset_module(netdev, core, mlxsw_m_port->module,
+ flags);
+}
+
+static int
+mlxsw_m_get_module_power_mode(struct net_device *netdev,
+ struct ethtool_module_power_mode_params *params,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_m_port *mlxsw_m_port = netdev_priv(netdev);
+ struct mlxsw_core *core = mlxsw_m_port->mlxsw_m->core;
+
+ return mlxsw_env_get_module_power_mode(core, mlxsw_m_port->module,
+ params, extack);
+}
+
+static int
+mlxsw_m_set_module_power_mode(struct net_device *netdev,
+ const struct ethtool_module_power_mode_params *params,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_m_port *mlxsw_m_port = netdev_priv(netdev);
+ struct mlxsw_core *core = mlxsw_m_port->mlxsw_m->core;
+
+ return mlxsw_env_set_module_power_mode(core, mlxsw_m_port->module,
+ params->policy, extack);
+}
+
static const struct ethtool_ops mlxsw_m_port_ethtool_ops = {
.get_drvinfo = mlxsw_m_module_get_drvinfo,
.get_module_info = mlxsw_m_get_module_info,
.get_module_eeprom = mlxsw_m_get_module_eeprom,
.get_module_eeprom_by_page = mlxsw_m_get_module_eeprom_by_page,
+ .reset = mlxsw_m_reset,
+ .get_module_power_mode = mlxsw_m_get_module_power_mode,
+ .set_module_power_mode = mlxsw_m_set_module_power_mode,
};
static int
@@ -152,20 +200,16 @@ static int
mlxsw_m_port_dev_addr_get(struct mlxsw_m_port *mlxsw_m_port)
{
struct mlxsw_m *mlxsw_m = mlxsw_m_port->mlxsw_m;
- struct net_device *dev = mlxsw_m_port->dev;
char ppad_pl[MLXSW_REG_PPAD_LEN];
+ u8 addr[ETH_ALEN];
int err;
mlxsw_reg_ppad_pack(ppad_pl, false, 0);
err = mlxsw_reg_query(mlxsw_m->core, MLXSW_REG(ppad), ppad_pl);
if (err)
return err;
- mlxsw_reg_ppad_mac_memcpy_from(ppad_pl, dev->dev_addr);
- /* The last byte value in base mac address is guaranteed
- * to be such it does not overflow when adding local_port
- * value.
- */
- dev->dev_addr[ETH_ALEN - 1] += mlxsw_m_port->module + 1;
+ mlxsw_reg_ppad_mac_memcpy_from(ppad_pl, addr);
+ eth_hw_addr_gen(mlxsw_m_port->dev, addr, mlxsw_m_port->module + 1);
return 0;
}
@@ -266,6 +310,7 @@ static int mlxsw_m_port_module_map(struct mlxsw_m *mlxsw_m, u8 local_port,
if (WARN_ON_ONCE(module >= max_ports))
return -EINVAL;
+ mlxsw_env_module_port_map(mlxsw_m->core, module);
mlxsw_m->module_to_port[module] = ++mlxsw_m->max_ports;
return 0;
@@ -274,6 +319,7 @@ static int mlxsw_m_port_module_map(struct mlxsw_m *mlxsw_m, u8 local_port,
static void mlxsw_m_port_module_unmap(struct mlxsw_m *mlxsw_m, u8 module)
{
mlxsw_m->module_to_port[module] = -1;
+ mlxsw_env_module_port_unmap(mlxsw_m->core, module);
}
static int mlxsw_m_ports_create(struct mlxsw_m *mlxsw_m)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 6fbda6ebd590..48b817ba6d4e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -4951,7 +4951,7 @@ enum mlxsw_reg_ppcnt_grp {
MLXSW_REG_PPCNT_DISCARD_CNT = 0x6,
MLXSW_REG_PPCNT_PRIO_CNT = 0x10,
MLXSW_REG_PPCNT_TC_CNT = 0x11,
- MLXSW_REG_PPCNT_TC_CONG_TC = 0x13,
+ MLXSW_REG_PPCNT_TC_CONG_CNT = 0x13,
};
/* reg_ppcnt_grp
@@ -5371,7 +5371,7 @@ MLXSW_ITEM64(reg, ppcnt, tx_pause_duration,
MLXSW_ITEM64(reg, ppcnt, tx_pause_transition,
MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x70, 0, 64);
-/* Ethernet Per Traffic Group Counters */
+/* Ethernet Per Traffic Class Counters */
/* reg_ppcnt_tc_transmit_queue
* Contains the transmit queue depth in cells of traffic class
@@ -5398,6 +5398,12 @@ MLXSW_ITEM64(reg, ppcnt, tc_no_buffer_discard_uc,
MLXSW_ITEM64(reg, ppcnt, wred_discard,
MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x00, 0, 64);
+/* reg_ppcnt_ecn_marked_tc
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ecn_marked_tc,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x08, 0, 64);
+
static inline void mlxsw_reg_ppcnt_pack(char *payload, u8 local_port,
enum mlxsw_reg_ppcnt_grp grp,
u8 prio_tc)
@@ -5681,6 +5687,14 @@ static inline void mlxsw_reg_pspa_pack(char *payload, u8 swid, u8 local_port)
MLXSW_REG_DEFINE(pmaos, MLXSW_REG_PMAOS_ID, MLXSW_REG_PMAOS_LEN);
+/* reg_pmaos_rst
+ * Module reset toggle.
+ * Note: Setting reset while module is plugged-in will result in transition to
+ * "initializing" operational state.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, pmaos, rst, 0x00, 31, 1);
+
/* reg_pmaos_slot_index
* Slot index.
* Access: Index
@@ -5693,6 +5707,24 @@ MLXSW_ITEM32(reg, pmaos, slot_index, 0x00, 24, 4);
*/
MLXSW_ITEM32(reg, pmaos, module, 0x00, 16, 8);
+enum mlxsw_reg_pmaos_admin_status {
+ MLXSW_REG_PMAOS_ADMIN_STATUS_ENABLED = 1,
+ MLXSW_REG_PMAOS_ADMIN_STATUS_DISABLED = 2,
+ /* If the module is active and then unplugged, or experienced an error
+ * event, the operational status should go to "disabled" and can only
+ * be enabled upon explicit enable command.
+ */
+ MLXSW_REG_PMAOS_ADMIN_STATUS_ENABLED_ONCE = 3,
+};
+
+/* reg_pmaos_admin_status
+ * Module administrative state (the desired state of the module).
+ * Note: To disable a module, all ports associated with the port must be
+ * administatively down first.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pmaos, admin_status, 0x00, 8, 4);
+
/* reg_pmaos_ase
* Admin state update enable.
* If this bit is set, admin state will be updated based on admin_state field.
@@ -5721,13 +5753,10 @@ enum mlxsw_reg_pmaos_e {
*/
MLXSW_ITEM32(reg, pmaos, e, 0x04, 0, 2);
-static inline void mlxsw_reg_pmaos_pack(char *payload, u8 module,
- enum mlxsw_reg_pmaos_e e)
+static inline void mlxsw_reg_pmaos_pack(char *payload, u8 module)
{
MLXSW_REG_ZERO(pmaos, payload);
mlxsw_reg_pmaos_module_set(payload, module);
- mlxsw_reg_pmaos_e_set(payload, e);
- mlxsw_reg_pmaos_ee_set(payload, true);
}
/* PPLR - Port Physical Loopback Register
@@ -5766,6 +5795,69 @@ static inline void mlxsw_reg_pplr_pack(char *payload, u8 local_port,
MLXSW_REG_PPLR_LB_TYPE_BIT_PHY_LOCAL : 0);
}
+/* PMTDB - Port Module To local DataBase Register
+ * ----------------------------------------------
+ * The PMTDB register allows to query the possible module<->local port
+ * mapping than can be used in PMLP. It does not represent the actual/current
+ * mapping of the local to module. Actual mapping is only defined by PMLP.
+ */
+#define MLXSW_REG_PMTDB_ID 0x501A
+#define MLXSW_REG_PMTDB_LEN 0x40
+
+MLXSW_REG_DEFINE(pmtdb, MLXSW_REG_PMTDB_ID, MLXSW_REG_PMTDB_LEN);
+
+/* reg_pmtdb_slot_index
+ * Slot index (0: Main board).
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pmtdb, slot_index, 0x00, 24, 4);
+
+/* reg_pmtdb_module
+ * Module number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pmtdb, module, 0x00, 16, 8);
+
+/* reg_pmtdb_ports_width
+ * Port's width
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pmtdb, ports_width, 0x00, 12, 4);
+
+/* reg_pmtdb_num_ports
+ * Number of ports in a single module (split/breakout)
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pmtdb, num_ports, 0x00, 8, 4);
+
+enum mlxsw_reg_pmtdb_status {
+ MLXSW_REG_PMTDB_STATUS_SUCCESS,
+};
+
+/* reg_pmtdb_status
+ * Status
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, pmtdb, status, 0x00, 0, 4);
+
+/* reg_pmtdb_port_num
+ * The local_port value which can be assigned to the module.
+ * In case of more than one port, port<x> represent the /<x> port of
+ * the module.
+ * Access: RO
+ */
+MLXSW_ITEM16_INDEXED(reg, pmtdb, port_num, 0x04, 0, 8, 0x02, 0x00, false);
+
+static inline void mlxsw_reg_pmtdb_pack(char *payload, u8 slot_index, u8 module,
+ u8 ports_width, u8 num_ports)
+{
+ MLXSW_REG_ZERO(pmtdb, payload);
+ mlxsw_reg_pmtdb_slot_index_set(payload, slot_index);
+ mlxsw_reg_pmtdb_module_set(payload, module);
+ mlxsw_reg_pmtdb_ports_width_set(payload, ports_width);
+ mlxsw_reg_pmtdb_num_ports_set(payload, num_ports);
+}
+
/* PMPE - Port Module Plug/Unplug Event Register
* ---------------------------------------------
* This register reports any operational status change of a module.
@@ -5860,67 +5952,100 @@ static inline void mlxsw_reg_pddr_pack(char *payload, u8 local_port,
mlxsw_reg_pddr_page_select_set(payload, page_select);
}
-/* PMTM - Port Module Type Mapping Register
- * ----------------------------------------
- * The PMTM allows query or configuration of module types.
+/* PMMP - Port Module Memory Map Properties Register
+ * -------------------------------------------------
+ * The PMMP register allows to override the module memory map advertisement.
+ * The register can only be set when the module is disabled by PMAOS register.
*/
-#define MLXSW_REG_PMTM_ID 0x5067
-#define MLXSW_REG_PMTM_LEN 0x10
+#define MLXSW_REG_PMMP_ID 0x5044
+#define MLXSW_REG_PMMP_LEN 0x2C
-MLXSW_REG_DEFINE(pmtm, MLXSW_REG_PMTM_ID, MLXSW_REG_PMTM_LEN);
+MLXSW_REG_DEFINE(pmmp, MLXSW_REG_PMMP_ID, MLXSW_REG_PMMP_LEN);
-/* reg_pmtm_module
+/* reg_pmmp_module
* Module number.
* Access: Index
*/
-MLXSW_ITEM32(reg, pmtm, module, 0x00, 16, 8);
+MLXSW_ITEM32(reg, pmmp, module, 0x00, 16, 8);
+
+/* reg_pmmp_sticky
+ * When set, will keep eeprom_override values after plug-out event.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, pmmp, sticky, 0x00, 0, 1);
-enum mlxsw_reg_pmtm_module_type {
- /* Backplane with 4 lanes */
- MLXSW_REG_PMTM_MODULE_TYPE_BP_4X,
- /* QSFP */
- MLXSW_REG_PMTM_MODULE_TYPE_QSFP,
- /* SFP */
- MLXSW_REG_PMTM_MODULE_TYPE_SFP,
- /* Backplane with single lane */
- MLXSW_REG_PMTM_MODULE_TYPE_BP_1X = 4,
- /* Backplane with two lane */
- MLXSW_REG_PMTM_MODULE_TYPE_BP_2X = 8,
- /* Chip2Chip4x */
- MLXSW_REG_PMTM_MODULE_TYPE_C2C4X = 10,
- /* Chip2Chip2x */
- MLXSW_REG_PMTM_MODULE_TYPE_C2C2X,
- /* Chip2Chip1x */
- MLXSW_REG_PMTM_MODULE_TYPE_C2C1X,
- /* QSFP-DD */
- MLXSW_REG_PMTM_MODULE_TYPE_QSFP_DD = 14,
- /* OSFP */
- MLXSW_REG_PMTM_MODULE_TYPE_OSFP,
- /* SFP-DD */
- MLXSW_REG_PMTM_MODULE_TYPE_SFP_DD,
- /* DSFP */
- MLXSW_REG_PMTM_MODULE_TYPE_DSFP,
- /* Chip2Chip8x */
- MLXSW_REG_PMTM_MODULE_TYPE_C2C8X,
+/* reg_pmmp_eeprom_override_mask
+ * Write mask bit (negative polarity).
+ * 0 - Allow write
+ * 1 - Ignore write
+ * On write, indicates which of the bits from eeprom_override field are
+ * updated.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, pmmp, eeprom_override_mask, 0x04, 16, 16);
+
+enum {
+ /* Set module to low power mode */
+ MLXSW_REG_PMMP_EEPROM_OVERRIDE_LOW_POWER_MASK = BIT(8),
};
-/* reg_pmtm_module_type
- * Module type.
+/* reg_pmmp_eeprom_override
+ * Override / ignore EEPROM advertisement properties bitmask
* Access: RW
*/
-MLXSW_ITEM32(reg, pmtm, module_type, 0x04, 0, 4);
+MLXSW_ITEM32(reg, pmmp, eeprom_override, 0x04, 0, 16);
-static inline void mlxsw_reg_pmtm_pack(char *payload, u8 module)
+static inline void mlxsw_reg_pmmp_pack(char *payload, u8 module)
{
- MLXSW_REG_ZERO(pmtm, payload);
- mlxsw_reg_pmtm_module_set(payload, module);
+ MLXSW_REG_ZERO(pmmp, payload);
+ mlxsw_reg_pmmp_module_set(payload, module);
}
-static inline void
-mlxsw_reg_pmtm_unpack(char *payload,
- enum mlxsw_reg_pmtm_module_type *module_type)
+/* PLLP - Port Local port to Label Port mapping Register
+ * -----------------------------------------------------
+ * The PLLP register returns the mapping from Local Port into Label Port.
+ */
+#define MLXSW_REG_PLLP_ID 0x504A
+#define MLXSW_REG_PLLP_LEN 0x10
+
+MLXSW_REG_DEFINE(pllp, MLXSW_REG_PLLP_ID, MLXSW_REG_PLLP_LEN);
+
+/* reg_pllp_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pllp, local_port, 0x00, 16, 8);
+
+/* reg_pllp_label_port
+ * Front panel label of the port.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, pllp, label_port, 0x00, 0, 8);
+
+/* reg_pllp_split_num
+ * Label split mapping for local_port.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, pllp, split_num, 0x04, 0, 4);
+
+/* reg_pllp_slot_index
+ * Slot index (0: Main board).
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, pllp, slot_index, 0x08, 0, 4);
+
+static inline void mlxsw_reg_pllp_pack(char *payload, u8 local_port)
+{
+ MLXSW_REG_ZERO(pllp, payload);
+ mlxsw_reg_pllp_local_port_set(payload, local_port);
+}
+
+static inline void mlxsw_reg_pllp_unpack(char *payload, u8 *label_port,
+ u8 *split_num, u8 *slot_index)
{
- *module_type = mlxsw_reg_pmtm_module_type_get(payload);
+ *label_port = mlxsw_reg_pllp_label_port_get(payload);
+ *split_num = mlxsw_reg_pllp_split_num_get(payload);
+ *slot_index = mlxsw_reg_pllp_slot_index_get(payload);
}
/* HTGT - Host Trap Group Table
@@ -6664,6 +6789,23 @@ mlxsw_reg_ritr_loopback_ipip4_pack(char *payload,
mlxsw_reg_ritr_loopback_ipip_usip4_set(payload, usip);
}
+static inline void
+mlxsw_reg_ritr_loopback_ipip6_pack(char *payload,
+ enum mlxsw_reg_ritr_loopback_ipip_type ipip_type,
+ enum mlxsw_reg_ritr_loopback_ipip_options options,
+ u16 uvr_id, u16 underlay_rif,
+ const struct in6_addr *usip, u32 gre_key)
+{
+ enum mlxsw_reg_ritr_loopback_protocol protocol =
+ MLXSW_REG_RITR_LOOPBACK_PROTOCOL_IPIP_IPV6;
+
+ mlxsw_reg_ritr_loopback_protocol_set(payload, protocol);
+ mlxsw_reg_ritr_loopback_ipip_common_pack(payload, ipip_type, options,
+ uvr_id, underlay_rif, gre_key);
+ mlxsw_reg_ritr_loopback_ipip_usip6_memcpy_to(payload,
+ (const char *)usip);
+}
+
/* RTAR - Router TCAM Allocation Register
* --------------------------------------
* This register is used for allocation of regions in the TCAM table.
@@ -6932,6 +7074,12 @@ static inline void mlxsw_reg_ratr_ipip4_entry_pack(char *payload, u32 ipv4_udip)
mlxsw_reg_ratr_ipip_ipv4_udip_set(payload, ipv4_udip);
}
+static inline void mlxsw_reg_ratr_ipip6_entry_pack(char *payload, u32 ipv6_ptr)
+{
+ mlxsw_reg_ratr_ipip_type_set(payload, MLXSW_REG_RATR_IPIP_TYPE_IPV6);
+ mlxsw_reg_ratr_ipip_ipv6_ptr_set(payload, ipv6_ptr);
+}
+
static inline void mlxsw_reg_ratr_counter_pack(char *payload, u64 counter_index,
bool counter_enable)
{
@@ -8117,19 +8265,71 @@ static inline void mlxsw_reg_rtdp_pack(char *payload,
}
static inline void
-mlxsw_reg_rtdp_ipip4_pack(char *payload, u16 irif,
- enum mlxsw_reg_rtdp_ipip_sip_check sip_check,
- unsigned int type_check, bool gre_key_check,
- u32 ipv4_usip, u32 expected_gre_key)
+mlxsw_reg_rtdp_ipip_pack(char *payload, u16 irif,
+ enum mlxsw_reg_rtdp_ipip_sip_check sip_check,
+ unsigned int type_check, bool gre_key_check,
+ u32 expected_gre_key)
{
mlxsw_reg_rtdp_ipip_irif_set(payload, irif);
mlxsw_reg_rtdp_ipip_sip_check_set(payload, sip_check);
mlxsw_reg_rtdp_ipip_type_check_set(payload, type_check);
mlxsw_reg_rtdp_ipip_gre_key_check_set(payload, gre_key_check);
- mlxsw_reg_rtdp_ipip_ipv4_usip_set(payload, ipv4_usip);
mlxsw_reg_rtdp_ipip_expected_gre_key_set(payload, expected_gre_key);
}
+static inline void
+mlxsw_reg_rtdp_ipip4_pack(char *payload, u16 irif,
+ enum mlxsw_reg_rtdp_ipip_sip_check sip_check,
+ unsigned int type_check, bool gre_key_check,
+ u32 ipv4_usip, u32 expected_gre_key)
+{
+ mlxsw_reg_rtdp_ipip_pack(payload, irif, sip_check, type_check,
+ gre_key_check, expected_gre_key);
+ mlxsw_reg_rtdp_ipip_ipv4_usip_set(payload, ipv4_usip);
+}
+
+static inline void
+mlxsw_reg_rtdp_ipip6_pack(char *payload, u16 irif,
+ enum mlxsw_reg_rtdp_ipip_sip_check sip_check,
+ unsigned int type_check, bool gre_key_check,
+ u32 ipv6_usip_ptr, u32 expected_gre_key)
+{
+ mlxsw_reg_rtdp_ipip_pack(payload, irif, sip_check, type_check,
+ gre_key_check, expected_gre_key);
+ mlxsw_reg_rtdp_ipip_ipv6_usip_ptr_set(payload, ipv6_usip_ptr);
+}
+
+/* RIPS - Router IP version Six Register
+ * -------------------------------------
+ * The RIPS register is used to store IPv6 addresses for use by the NVE and
+ * IPinIP
+ */
+#define MLXSW_REG_RIPS_ID 0x8021
+#define MLXSW_REG_RIPS_LEN 0x14
+
+MLXSW_REG_DEFINE(rips, MLXSW_REG_RIPS_ID, MLXSW_REG_RIPS_LEN);
+
+/* reg_rips_index
+ * Index to IPv6 address.
+ * For Spectrum, the index is to the KVD linear.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, rips, index, 0x00, 0, 24);
+
+/* reg_rips_ipv6
+ * IPv6 address
+ * Access: RW
+ */
+MLXSW_ITEM_BUF(reg, rips, ipv6, 0x04, 16);
+
+static inline void mlxsw_reg_rips_pack(char *payload, u32 index,
+ const struct in6_addr *ipv6)
+{
+ MLXSW_REG_ZERO(rips, payload);
+ mlxsw_reg_rips_index_set(payload, index);
+ mlxsw_reg_rips_ipv6_memcpy_to(payload, (const char *)ipv6);
+}
+
/* RATRAD - Router Adjacency Table Activity Dump Register
* ------------------------------------------------------
* The RATRAD register is used to dump and optionally clear activity bits of
@@ -10208,6 +10408,39 @@ static inline void mlxsw_reg_mlcr_pack(char *payload, u8 local_port,
MLXSW_REG_MLCR_DURATION_MAX : 0);
}
+/* MCION - Management Cable IO and Notifications Register
+ * ------------------------------------------------------
+ * The MCION register is used to query transceiver modules' IO pins and other
+ * notifications.
+ */
+#define MLXSW_REG_MCION_ID 0x9052
+#define MLXSW_REG_MCION_LEN 0x18
+
+MLXSW_REG_DEFINE(mcion, MLXSW_REG_MCION_ID, MLXSW_REG_MCION_LEN);
+
+/* reg_mcion_module
+ * Module number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mcion, module, 0x00, 16, 8);
+
+enum {
+ MLXSW_REG_MCION_MODULE_STATUS_BITS_PRESENT_MASK = BIT(0),
+ MLXSW_REG_MCION_MODULE_STATUS_BITS_LOW_POWER_MASK = BIT(8),
+};
+
+/* reg_mcion_module_status_bits
+ * Module IO status as defined by SFF.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mcion, module_status_bits, 0x04, 0, 16);
+
+static inline void mlxsw_reg_mcion_pack(char *payload, u8 module)
+{
+ MLXSW_REG_ZERO(mcion, payload);
+ mlxsw_reg_mcion_module_set(payload, module);
+}
+
/* MTPPS - Management Pulse Per Second Register
* --------------------------------------------
* This register provides the device PPS capabilities, configure the PPS in and
@@ -12200,9 +12433,11 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(pspa),
MLXSW_REG(pmaos),
MLXSW_REG(pplr),
+ MLXSW_REG(pmtdb),
MLXSW_REG(pmpe),
MLXSW_REG(pddr),
- MLXSW_REG(pmtm),
+ MLXSW_REG(pmmp),
+ MLXSW_REG(pllp),
MLXSW_REG(htgt),
MLXSW_REG(hpkt),
MLXSW_REG(rgcr),
@@ -12210,6 +12445,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(rtar),
MLXSW_REG(ratr),
MLXSW_REG(rtdp),
+ MLXSW_REG(rips),
MLXSW_REG(ratrad),
MLXSW_REG(rdpm),
MLXSW_REG(ricnt),
@@ -12249,6 +12485,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(mgir),
MLXSW_REG(mrsr),
MLXSW_REG(mlcr),
+ MLXSW_REG(mcion),
MLXSW_REG(mtpps),
MLXSW_REG(mtutc),
MLXSW_REG(mpsc),
diff --git a/drivers/net/ethernet/mellanox/mlxsw/resources.h b/drivers/net/ethernet/mellanox/mlxsw/resources.h
index a56c9e19a390..a1512be77867 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/resources.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/resources.h
@@ -25,9 +25,6 @@ enum mlxsw_res_id {
MLXSW_RES_ID_MAX_SYSTEM_PORT,
MLXSW_RES_ID_MAX_LAG,
MLXSW_RES_ID_MAX_LAG_MEMBERS,
- MLXSW_RES_ID_LOCAL_PORTS_IN_1X,
- MLXSW_RES_ID_LOCAL_PORTS_IN_2X,
- MLXSW_RES_ID_LOCAL_PORTS_IN_4X,
MLXSW_RES_ID_GUARANTEED_SHARED_BUFFER,
MLXSW_RES_ID_CELL_SIZE,
MLXSW_RES_ID_MAX_HEADROOM_SIZE,
@@ -84,9 +81,6 @@ static u16 mlxsw_res_ids[] = {
[MLXSW_RES_ID_MAX_SYSTEM_PORT] = 0x2502,
[MLXSW_RES_ID_MAX_LAG] = 0x2520,
[MLXSW_RES_ID_MAX_LAG_MEMBERS] = 0x2521,
- [MLXSW_RES_ID_LOCAL_PORTS_IN_1X] = 0x2610,
- [MLXSW_RES_ID_LOCAL_PORTS_IN_2X] = 0x2611,
- [MLXSW_RES_ID_LOCAL_PORTS_IN_4X] = 0x2612,
[MLXSW_RES_ID_GUARANTEED_SHARED_BUFFER] = 0x2805, /* Bytes */
[MLXSW_RES_ID_CELL_SIZE] = 0x2803, /* Bytes */
[MLXSW_RES_ID_MAX_HEADROOM_SIZE] = 0x2811, /* Bytes */
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 250c5a24264d..66c346a86ec5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -47,7 +47,7 @@
#define MLXSW_SP1_FWREV_MAJOR 13
#define MLXSW_SP1_FWREV_MINOR 2008
-#define MLXSW_SP1_FWREV_SUBMINOR 2406
+#define MLXSW_SP1_FWREV_SUBMINOR 3326
#define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702
static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = {
@@ -64,7 +64,7 @@ static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = {
#define MLXSW_SP2_FWREV_MAJOR 29
#define MLXSW_SP2_FWREV_MINOR 2008
-#define MLXSW_SP2_FWREV_SUBMINOR 2406
+#define MLXSW_SP2_FWREV_SUBMINOR 3326
static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = {
.major = MLXSW_SP2_FWREV_MAJOR,
@@ -79,7 +79,7 @@ static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = {
#define MLXSW_SP3_FWREV_MAJOR 30
#define MLXSW_SP3_FWREV_MINOR 2008
-#define MLXSW_SP3_FWREV_SUBMINOR 2406
+#define MLXSW_SP3_FWREV_SUBMINOR 3326
static const struct mlxsw_fw_rev mlxsw_sp3_fw_rev = {
.major = MLXSW_SP3_FWREV_MAJOR,
@@ -316,11 +316,11 @@ static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- unsigned char *addr = mlxsw_sp_port->dev->dev_addr;
- ether_addr_copy(addr, mlxsw_sp->base_mac);
- addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port;
- return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr);
+ eth_hw_addr_gen(mlxsw_sp_port->dev, mlxsw_sp->base_mac,
+ mlxsw_sp_port->local_port);
+ return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port,
+ mlxsw_sp_port->dev->dev_addr);
}
static int mlxsw_sp_port_max_mtu_get(struct mlxsw_sp_port *mlxsw_sp_port, int *p_max_mtu)
@@ -351,12 +351,12 @@ static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
}
-static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid)
+static int mlxsw_sp_port_swid_set(struct mlxsw_sp *mlxsw_sp,
+ u8 local_port, u8 swid)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
char pspa_pl[MLXSW_REG_PSPA_LEN];
- mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port);
+ mlxsw_reg_pspa_pack(pspa_pl, swid, local_port);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
}
@@ -529,55 +529,80 @@ mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u8 local_port,
port_mapping->module = module;
port_mapping->width = width;
+ port_mapping->module_width = width;
port_mapping->lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
return 0;
}
-static int mlxsw_sp_port_module_map(struct mlxsw_sp_port *mlxsw_sp_port)
+static int
+mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+ const struct mlxsw_sp_port_mapping *port_mapping)
{
- struct mlxsw_sp_port_mapping *port_mapping = &mlxsw_sp_port->mapping;
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
char pmlp_pl[MLXSW_REG_PMLP_LEN];
- int i;
+ int i, err;
+
+ mlxsw_env_module_port_map(mlxsw_sp->core, port_mapping->module);
- mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port);
+ mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
mlxsw_reg_pmlp_width_set(pmlp_pl, port_mapping->width);
for (i = 0; i < port_mapping->width; i++) {
mlxsw_reg_pmlp_module_set(pmlp_pl, i, port_mapping->module);
mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, port_mapping->lane + i); /* Rx & Tx */
}
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
+ if (err)
+ goto err_pmlp_write;
+ return 0;
+
+err_pmlp_write:
+ mlxsw_env_module_port_unmap(mlxsw_sp->core, port_mapping->module);
+ return err;
}
-static int mlxsw_sp_port_module_unmap(struct mlxsw_sp_port *mlxsw_sp_port)
+static void mlxsw_sp_port_module_unmap(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+ u8 module)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
char pmlp_pl[MLXSW_REG_PMLP_LEN];
- mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port);
+ mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
mlxsw_reg_pmlp_width_set(pmlp_pl, 0);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
+ mlxsw_env_module_port_unmap(mlxsw_sp->core, module);
}
static int mlxsw_sp_port_open(struct net_device *dev)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
int err;
- err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
+ err = mlxsw_env_module_port_up(mlxsw_sp->core,
+ mlxsw_sp_port->mapping.module);
if (err)
return err;
+ err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
+ if (err)
+ goto err_port_admin_status_set;
netif_start_queue(dev);
return 0;
+
+err_port_admin_status_set:
+ mlxsw_env_module_port_down(mlxsw_sp->core,
+ mlxsw_sp_port->mapping.module);
+ return err;
}
static int mlxsw_sp_port_stop(struct net_device *dev)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
netif_stop_queue(dev);
- return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
+ mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
+ mlxsw_env_module_port_down(mlxsw_sp->core,
+ mlxsw_sp_port->mapping.module);
+ return 0;
}
static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
@@ -649,7 +674,7 @@ static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p)
err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data);
if (err)
return err;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, addr->sa_data);
return 0;
}
@@ -799,12 +824,16 @@ mlxsw_sp_port_get_hw_xstats(struct net_device *dev,
for (i = 0; i < TC_MAX_QUEUE; i++) {
err = mlxsw_sp_port_get_stats_raw(dev,
- MLXSW_REG_PPCNT_TC_CONG_TC,
+ MLXSW_REG_PPCNT_TC_CONG_CNT,
i, ppcnt_pl);
- if (!err)
- xstats->wred_drop[i] =
- mlxsw_reg_ppcnt_wred_discard_get(ppcnt_pl);
+ if (err)
+ goto tc_cnt;
+ xstats->wred_drop[i] =
+ mlxsw_reg_ppcnt_wred_discard_get(ppcnt_pl);
+ xstats->tc_ecn[i] = mlxsw_reg_ppcnt_ecn_marked_tc_get(ppcnt_pl);
+
+tc_cnt:
err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_TC_CNT,
i, ppcnt_pl);
if (err)
@@ -1010,6 +1039,8 @@ static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port,
return mlxsw_sp_setup_tc_block_clsact(mlxsw_sp_port, f, false);
case FLOW_BLOCK_BINDER_TYPE_RED_EARLY_DROP:
return mlxsw_sp_setup_tc_block_qevent_early_drop(mlxsw_sp_port, f);
+ case FLOW_BLOCK_BINDER_TYPE_RED_MARK:
+ return mlxsw_sp_setup_tc_block_qevent_mark(mlxsw_sp_port, f);
default:
return -EOPNOTSUPP;
}
@@ -1442,29 +1473,68 @@ mlxsw_sp_port_vlan_classification_set(struct mlxsw_sp_port *mlxsw_sp_port,
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvc), spvc_pl);
}
+static int mlxsw_sp_port_label_info_get(struct mlxsw_sp *mlxsw_sp,
+ u8 local_port, u8 *port_number,
+ u8 *split_port_subnumber,
+ u8 *slot_index)
+{
+ char pllp_pl[MLXSW_REG_PLLP_LEN];
+ int err;
+
+ mlxsw_reg_pllp_pack(pllp_pl, local_port);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pllp), pllp_pl);
+ if (err)
+ return err;
+ mlxsw_reg_pllp_unpack(pllp_pl, port_number,
+ split_port_subnumber, slot_index);
+ return 0;
+}
+
static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
- u8 split_base_local_port,
+ bool split,
struct mlxsw_sp_port_mapping *port_mapping)
{
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
- bool split = !!split_base_local_port;
struct mlxsw_sp_port *mlxsw_sp_port;
u32 lanes = port_mapping->width;
+ u8 split_port_subnumber;
struct net_device *dev;
+ u8 port_number;
+ u8 slot_index;
bool splittable;
int err;
+ err = mlxsw_sp_port_module_map(mlxsw_sp, local_port, port_mapping);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n",
+ local_port);
+ return err;
+ }
+
+ err = mlxsw_sp_port_swid_set(mlxsw_sp, local_port, 0);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
+ local_port);
+ goto err_port_swid_set;
+ }
+
+ err = mlxsw_sp_port_label_info_get(mlxsw_sp, local_port, &port_number,
+ &split_port_subnumber, &slot_index);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get port label information\n",
+ local_port);
+ goto err_port_label_info_get;
+ }
+
splittable = lanes > 1 && !split;
err = mlxsw_core_port_init(mlxsw_sp->core, local_port,
- port_mapping->module + 1, split,
- port_mapping->lane / lanes,
- splittable, lanes,
- mlxsw_sp->base_mac,
+ port_number, split, split_port_subnumber,
+ splittable, lanes, mlxsw_sp->base_mac,
sizeof(mlxsw_sp->base_mac));
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n",
local_port);
- return err;
+ goto err_core_port_init;
}
dev = alloc_etherdev(sizeof(struct mlxsw_sp_port));
@@ -1480,7 +1550,6 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
mlxsw_sp_port->local_port = local_port;
mlxsw_sp_port->pvid = MLXSW_SP_DEFAULT_VID;
mlxsw_sp_port->split = split;
- mlxsw_sp_port->split_base_local_port = split_base_local_port;
mlxsw_sp_port->mapping = *port_mapping;
mlxsw_sp_port->link.autoneg = 1;
INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list);
@@ -1498,20 +1567,6 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
- err = mlxsw_sp_port_module_map(mlxsw_sp_port);
- if (err) {
- dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n",
- mlxsw_sp_port->local_port);
- goto err_port_module_map;
- }
-
- err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0);
- if (err) {
- dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
- mlxsw_sp_port->local_port);
- goto err_port_swid_set;
- }
-
err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n",
@@ -1712,21 +1767,24 @@ err_max_speed_get:
err_port_speed_by_width_set:
err_port_system_port_mapping_set:
err_dev_addr_init:
- mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
-err_port_swid_set:
- mlxsw_sp_port_module_unmap(mlxsw_sp_port);
-err_port_module_map:
free_percpu(mlxsw_sp_port->pcpu_stats);
err_alloc_stats:
free_netdev(dev);
err_alloc_etherdev:
mlxsw_core_port_fini(mlxsw_sp->core, local_port);
+err_core_port_init:
+err_port_label_info_get:
+ mlxsw_sp_port_swid_set(mlxsw_sp, local_port,
+ MLXSW_PORT_SWID_DISABLED_PORT);
+err_port_swid_set:
+ mlxsw_sp_port_module_unmap(mlxsw_sp, local_port, port_mapping->module);
return err;
}
static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
{
struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
+ u8 module = mlxsw_sp_port->mapping.module;
cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw);
cancel_delayed_work_sync(&mlxsw_sp_port->ptp.shaper_dw);
@@ -1742,12 +1800,13 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false);
mlxsw_sp_port_buffers_fini(mlxsw_sp_port);
- mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
- mlxsw_sp_port_module_unmap(mlxsw_sp_port);
free_percpu(mlxsw_sp_port->pcpu_stats);
WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list));
free_netdev(mlxsw_sp_port->dev);
mlxsw_core_port_fini(mlxsw_sp->core, local_port);
+ mlxsw_sp_port_swid_set(mlxsw_sp, local_port,
+ MLXSW_PORT_SWID_DISABLED_PORT);
+ mlxsw_sp_port_module_unmap(mlxsw_sp, local_port, module);
}
static int mlxsw_sp_cpu_port_create(struct mlxsw_sp *mlxsw_sp)
@@ -1789,8 +1848,15 @@ static void mlxsw_sp_cpu_port_remove(struct mlxsw_sp *mlxsw_sp)
kfree(mlxsw_sp_port);
}
+static bool mlxsw_sp_local_port_valid(u8 local_port)
+{
+ return local_port != MLXSW_PORT_CPU_PORT;
+}
+
static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u8 local_port)
{
+ if (!mlxsw_sp_local_port_valid(local_port))
+ return false;
return mlxsw_sp->ports[local_port] != NULL;
}
@@ -1827,7 +1893,7 @@ static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
port_mapping = mlxsw_sp->port_mapping[i];
if (!port_mapping)
continue;
- err = mlxsw_sp_port_create(mlxsw_sp, i, 0, port_mapping);
+ err = mlxsw_sp_port_create(mlxsw_sp, i, false, port_mapping);
if (err)
goto err_port_create;
}
@@ -1894,17 +1960,10 @@ static void mlxsw_sp_port_module_info_fini(struct mlxsw_sp *mlxsw_sp)
kfree(mlxsw_sp->port_mapping);
}
-static u8 mlxsw_sp_cluster_base_port_get(u8 local_port, unsigned int max_width)
-{
- u8 offset = (local_port - 1) % max_width;
-
- return local_port - offset;
-}
-
static int
-mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port,
+mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_port_mapping *port_mapping,
- unsigned int count, u8 offset)
+ unsigned int count, const char *pmtdb_pl)
{
struct mlxsw_sp_port_mapping split_port_mapping;
int err, i;
@@ -1912,8 +1971,13 @@ mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port,
split_port_mapping = *port_mapping;
split_port_mapping.width /= count;
for (i = 0; i < count; i++) {
- err = mlxsw_sp_port_create(mlxsw_sp, base_port + i * offset,
- base_port, &split_port_mapping);
+ u8 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
+
+ if (!mlxsw_sp_local_port_valid(s_local_port))
+ continue;
+
+ err = mlxsw_sp_port_create(mlxsw_sp, s_local_port,
+ true, &split_port_mapping);
if (err)
goto err_port_create;
split_port_mapping.lane += split_port_mapping.width;
@@ -1922,49 +1986,34 @@ mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port,
return 0;
err_port_create:
- for (i--; i >= 0; i--)
- if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset))
- mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset);
+ for (i--; i >= 0; i--) {
+ u8 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
+
+ if (mlxsw_sp_port_created(mlxsw_sp, s_local_port))
+ mlxsw_sp_port_remove(mlxsw_sp, s_local_port);
+ }
return err;
}
static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
- u8 base_port,
- unsigned int count, u8 offset)
+ unsigned int count,
+ const char *pmtdb_pl)
{
struct mlxsw_sp_port_mapping *port_mapping;
int i;
/* Go over original unsplit ports in the gap and recreate them. */
- for (i = 0; i < count * offset; i++) {
- port_mapping = mlxsw_sp->port_mapping[base_port + i];
- if (!port_mapping)
+ for (i = 0; i < count; i++) {
+ u8 local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
+
+ port_mapping = mlxsw_sp->port_mapping[local_port];
+ if (!port_mapping || !mlxsw_sp_local_port_valid(local_port))
continue;
- mlxsw_sp_port_create(mlxsw_sp, base_port + i, 0, port_mapping);
+ mlxsw_sp_port_create(mlxsw_sp, local_port,
+ false, port_mapping);
}
}
-static int mlxsw_sp_local_ports_offset(struct mlxsw_core *mlxsw_core,
- unsigned int count,
- unsigned int max_width)
-{
- enum mlxsw_res_id local_ports_in_x_res_id;
- int split_width = max_width / count;
-
- if (split_width == 1)
- local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_1X;
- else if (split_width == 2)
- local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_2X;
- else if (split_width == 4)
- local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_4X;
- else
- return -EINVAL;
-
- if (!mlxsw_core_res_valid(mlxsw_core, local_ports_in_x_res_id))
- return -EINVAL;
- return mlxsw_core_res_get(mlxsw_core, local_ports_in_x_res_id);
-}
-
static struct mlxsw_sp_port *
mlxsw_sp_port_get_by_local_port(struct mlxsw_sp *mlxsw_sp, u8 local_port)
{
@@ -1980,9 +2029,8 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
struct mlxsw_sp_port_mapping port_mapping;
struct mlxsw_sp_port *mlxsw_sp_port;
- int max_width;
- u8 base_port;
- int offset;
+ enum mlxsw_reg_pmtdb_status status;
+ char pmtdb_pl[MLXSW_REG_PMTDB_LEN];
int i;
int err;
@@ -1994,57 +2042,37 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
return -EINVAL;
}
- max_width = mlxsw_core_module_max_width(mlxsw_core,
- mlxsw_sp_port->mapping.module);
- if (max_width < 0) {
- netdev_err(mlxsw_sp_port->dev, "Cannot get max width of port module\n");
- NL_SET_ERR_MSG_MOD(extack, "Cannot get max width of port module");
- return max_width;
+ if (mlxsw_sp_port->split) {
+ NL_SET_ERR_MSG_MOD(extack, "Port is already split");
+ return -EINVAL;
}
- /* Split port with non-max cannot be split. */
- if (mlxsw_sp_port->mapping.width != max_width) {
- netdev_err(mlxsw_sp_port->dev, "Port cannot be split\n");
- NL_SET_ERR_MSG_MOD(extack, "Port cannot be split");
- return -EINVAL;
+ mlxsw_reg_pmtdb_pack(pmtdb_pl, 0, mlxsw_sp_port->mapping.module,
+ mlxsw_sp_port->mapping.module_width / count,
+ count);
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(pmtdb), pmtdb_pl);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to query split info");
+ return err;
}
- offset = mlxsw_sp_local_ports_offset(mlxsw_core, count, max_width);
- if (offset < 0) {
- netdev_err(mlxsw_sp_port->dev, "Cannot obtain local port offset\n");
- NL_SET_ERR_MSG_MOD(extack, "Cannot obtain local port offset");
+ status = mlxsw_reg_pmtdb_status_get(pmtdb_pl);
+ if (status != MLXSW_REG_PMTDB_STATUS_SUCCESS) {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported split configuration");
return -EINVAL;
}
- /* Only in case max split is being done, the local port and
- * base port may differ.
- */
- base_port = count == max_width ?
- mlxsw_sp_cluster_base_port_get(local_port, max_width) :
- local_port;
+ port_mapping = mlxsw_sp_port->mapping;
- for (i = 0; i < count * offset; i++) {
- /* Expect base port to exist and also the one in the middle in
- * case of maximal split count.
- */
- if (i == 0 || (count == max_width && i == count / 2))
- continue;
+ for (i = 0; i < count; i++) {
+ u8 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
- if (mlxsw_sp_port_created(mlxsw_sp, base_port + i)) {
- netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
- NL_SET_ERR_MSG_MOD(extack, "Invalid split configuration");
- return -EINVAL;
- }
+ if (mlxsw_sp_port_created(mlxsw_sp, s_local_port))
+ mlxsw_sp_port_remove(mlxsw_sp, s_local_port);
}
- port_mapping = mlxsw_sp_port->mapping;
-
- for (i = 0; i < count; i++)
- if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset))
- mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset);
-
- err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, &port_mapping,
- count, offset);
+ err = mlxsw_sp_port_split_create(mlxsw_sp, &port_mapping,
+ count, pmtdb_pl);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n");
goto err_port_split_create;
@@ -2053,7 +2081,7 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
return 0;
err_port_split_create:
- mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count, offset);
+ mlxsw_sp_port_unsplit_create(mlxsw_sp, count, pmtdb_pl);
return err;
}
@@ -2062,11 +2090,10 @@ static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port,
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
struct mlxsw_sp_port *mlxsw_sp_port;
+ char pmtdb_pl[MLXSW_REG_PMTDB_LEN];
unsigned int count;
- int max_width;
- u8 base_port;
- int offset;
int i;
+ int err;
mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port);
if (!mlxsw_sp_port) {
@@ -2077,35 +2104,30 @@ static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port,
}
if (!mlxsw_sp_port->split) {
- netdev_err(mlxsw_sp_port->dev, "Port was not split\n");
NL_SET_ERR_MSG_MOD(extack, "Port was not split");
return -EINVAL;
}
- max_width = mlxsw_core_module_max_width(mlxsw_core,
- mlxsw_sp_port->mapping.module);
- if (max_width < 0) {
- netdev_err(mlxsw_sp_port->dev, "Cannot get max width of port module\n");
- NL_SET_ERR_MSG_MOD(extack, "Cannot get max width of port module");
- return max_width;
- }
-
- count = max_width / mlxsw_sp_port->mapping.width;
+ count = mlxsw_sp_port->mapping.module_width /
+ mlxsw_sp_port->mapping.width;
- offset = mlxsw_sp_local_ports_offset(mlxsw_core, count, max_width);
- if (WARN_ON(offset < 0)) {
- netdev_err(mlxsw_sp_port->dev, "Cannot obtain local port offset\n");
- NL_SET_ERR_MSG_MOD(extack, "Cannot obtain local port offset");
- return -EINVAL;
+ mlxsw_reg_pmtdb_pack(pmtdb_pl, 0, mlxsw_sp_port->mapping.module,
+ mlxsw_sp_port->mapping.module_width / count,
+ count);
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(pmtdb), pmtdb_pl);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to query split info");
+ return err;
}
- base_port = mlxsw_sp_port->split_base_local_port;
+ for (i = 0; i < count; i++) {
+ u8 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
- for (i = 0; i < count; i++)
- if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset))
- mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset);
+ if (mlxsw_sp_port_created(mlxsw_sp, s_local_port))
+ mlxsw_sp_port_remove(mlxsw_sp, s_local_port);
+ }
- mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count, offset);
+ mlxsw_sp_port_unsplit_create(mlxsw_sp, count, pmtdb_pl);
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 3a43cba6d23c..3ab57e98cad2 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -144,7 +144,8 @@ struct mlxsw_sp_mall_entry;
struct mlxsw_sp_port_mapping {
u8 module;
- u8 width;
+ u8 width; /* Number of lanes used by the port */
+ u8 module_width; /* Number of lanes in the module (static) */
u8 lane;
};
@@ -284,6 +285,7 @@ struct mlxsw_sp_port_vlan {
/* No need an internal lock; At worse - miss a single periodic iteration */
struct mlxsw_sp_port_xstats {
u64 ecn;
+ u64 tc_ecn[TC_MAX_QUEUE];
u64 wred_drop[TC_MAX_QUEUE];
u64 tail_drop[TC_MAX_QUEUE];
u64 backlog[TC_MAX_QUEUE];
@@ -345,7 +347,6 @@ struct mlxsw_sp_port {
u16 egr_types;
struct mlxsw_sp_ptp_port_stats stats;
} ptp;
- u8 split_base_local_port;
int max_mtu;
u32 max_speed;
struct mlxsw_sp_hdroom *hdroom;
@@ -747,6 +748,7 @@ enum mlxsw_sp_kvdl_entry_type {
MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET,
MLXSW_SP_KVDL_ENTRY_TYPE_PBS,
MLXSW_SP_KVDL_ENTRY_TYPE_MCRIGR,
+ MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS,
MLXSW_SP_KVDL_ENTRY_TYPE_TNUMT,
};
@@ -758,6 +760,7 @@ mlxsw_sp_kvdl_entry_size(enum mlxsw_sp_kvdl_entry_type type)
case MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET:
case MLXSW_SP_KVDL_ENTRY_TYPE_PBS:
case MLXSW_SP_KVDL_ENTRY_TYPE_MCRIGR:
+ case MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS:
case MLXSW_SP_KVDL_ENTRY_TYPE_TNUMT:
default:
return 1;
@@ -1193,6 +1196,8 @@ int mlxsw_sp_setup_tc_fifo(struct mlxsw_sp_port *mlxsw_sp_port,
struct tc_fifo_qopt_offload *p);
int mlxsw_sp_setup_tc_block_qevent_early_drop(struct mlxsw_sp_port *mlxsw_sp_port,
struct flow_block_offload *f);
+int mlxsw_sp_setup_tc_block_qevent_mark(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct flow_block_offload *f);
/* spectrum_fid.c */
bool mlxsw_sp_fid_is_dummy(struct mlxsw_sp *mlxsw_sp, u16 fid_index);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c
index 3a73d654017f..10ae1115de6c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c
@@ -35,6 +35,7 @@ static const struct mlxsw_sp2_kvdl_part_info mlxsw_sp2_kvdl_parts_info[] = {
MAX_KVD_ACTION_SETS),
MLXSW_SP2_KVDL_PART_INFO(PBS, 0x24, KVD_SIZE, KVD_SIZE),
MLXSW_SP2_KVDL_PART_INFO(MCRIGR, 0x26, KVD_SIZE, KVD_SIZE),
+ MLXSW_SP2_KVDL_PART_INFO(IPV6_ADDRESS, 0x28, KVD_SIZE, KVD_SIZE),
MLXSW_SP2_KVDL_PART_INFO(TNUMT, 0x29, KVD_SIZE, KVD_SIZE),
};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
index 9de160e740b2..d78cf5a7220a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
@@ -1583,7 +1583,7 @@ int mlxsw_sp_sb_occ_snapshot(struct mlxsw_core *mlxsw_core,
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx;
- unsigned long cb_priv;
+ unsigned long cb_priv = 0;
LIST_HEAD(bulk_list);
char *sbsr_pl;
u8 masked_count;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
index 267590a0eee7..84d4460f3dcd 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
@@ -96,6 +96,9 @@ mlxsw_sp_link_ext_state_opcode_map[] = {
{1032, ETHTOOL_LINK_EXT_STATE_POWER_BUDGET_EXCEEDED, 0},
{1030, ETHTOOL_LINK_EXT_STATE_OVERHEAT, 0},
+
+ {1042, ETHTOOL_LINK_EXT_STATE_MODULE,
+ ETHTOOL_LINK_EXT_SUBSTATE_MODULE_CMIS_NOT_READY},
};
static void
@@ -124,6 +127,10 @@ mlxsw_sp_port_set_link_ext_state(struct mlxsw_sp_ethtool_link_ext_state_opcode_m
link_ext_state_info->cable_issue =
link_ext_state_mapping.link_ext_substate;
break;
+ case ETHTOOL_LINK_EXT_STATE_MODULE:
+ link_ext_state_info->module =
+ link_ext_state_mapping.link_ext_substate;
+ break;
default:
break;
}
@@ -1197,6 +1204,41 @@ mlxsw_sp_get_rmon_stats(struct net_device *dev,
*ranges = mlxsw_rmon_ranges;
}
+static int mlxsw_sp_reset(struct net_device *dev, u32 *flags)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u8 module = mlxsw_sp_port->mapping.module;
+
+ return mlxsw_env_reset_module(dev, mlxsw_sp->core, module, flags);
+}
+
+static int
+mlxsw_sp_get_module_power_mode(struct net_device *dev,
+ struct ethtool_module_power_mode_params *params,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u8 module = mlxsw_sp_port->mapping.module;
+
+ return mlxsw_env_get_module_power_mode(mlxsw_sp->core, module, params,
+ extack);
+}
+
+static int
+mlxsw_sp_set_module_power_mode(struct net_device *dev,
+ const struct ethtool_module_power_mode_params *params,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u8 module = mlxsw_sp_port->mapping.module;
+
+ return mlxsw_env_set_module_power_mode(mlxsw_sp->core, module,
+ params->policy, extack);
+}
+
const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
.cap_link_lanes_supported = true,
.get_drvinfo = mlxsw_sp_port_get_drvinfo,
@@ -1218,6 +1260,9 @@ const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
.get_eth_mac_stats = mlxsw_sp_get_eth_mac_stats,
.get_eth_ctrl_stats = mlxsw_sp_get_eth_ctrl_stats,
.get_rmon_stats = mlxsw_sp_get_rmon_stats,
+ .reset = mlxsw_sp_reset,
+ .get_module_power_mode = mlxsw_sp_get_module_power_mode,
+ .set_module_power_mode = mlxsw_sp_set_module_power_mode,
};
struct mlxsw_sp1_port_link_mode {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
index 5facabd86882..ad3926de88f2 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
@@ -24,50 +24,72 @@ mlxsw_sp_ipip_netdev_parms6(const struct net_device *ol_dev)
return tun->parms;
}
-static bool mlxsw_sp_ipip_parms4_has_ikey(struct ip_tunnel_parm parms)
+static bool mlxsw_sp_ipip_parms4_has_ikey(const struct ip_tunnel_parm *parms)
{
- return !!(parms.i_flags & TUNNEL_KEY);
+ return !!(parms->i_flags & TUNNEL_KEY);
}
-static bool mlxsw_sp_ipip_parms4_has_okey(struct ip_tunnel_parm parms)
+static bool mlxsw_sp_ipip_parms6_has_ikey(const struct __ip6_tnl_parm *parms)
{
- return !!(parms.o_flags & TUNNEL_KEY);
+ return !!(parms->i_flags & TUNNEL_KEY);
}
-static u32 mlxsw_sp_ipip_parms4_ikey(struct ip_tunnel_parm parms)
+static bool mlxsw_sp_ipip_parms4_has_okey(const struct ip_tunnel_parm *parms)
+{
+ return !!(parms->o_flags & TUNNEL_KEY);
+}
+
+static bool mlxsw_sp_ipip_parms6_has_okey(const struct __ip6_tnl_parm *parms)
+{
+ return !!(parms->o_flags & TUNNEL_KEY);
+}
+
+static u32 mlxsw_sp_ipip_parms4_ikey(const struct ip_tunnel_parm *parms)
{
return mlxsw_sp_ipip_parms4_has_ikey(parms) ?
- be32_to_cpu(parms.i_key) : 0;
+ be32_to_cpu(parms->i_key) : 0;
}
-static u32 mlxsw_sp_ipip_parms4_okey(struct ip_tunnel_parm parms)
+static u32 mlxsw_sp_ipip_parms6_ikey(const struct __ip6_tnl_parm *parms)
+{
+ return mlxsw_sp_ipip_parms6_has_ikey(parms) ?
+ be32_to_cpu(parms->i_key) : 0;
+}
+
+static u32 mlxsw_sp_ipip_parms4_okey(const struct ip_tunnel_parm *parms)
{
return mlxsw_sp_ipip_parms4_has_okey(parms) ?
- be32_to_cpu(parms.o_key) : 0;
+ be32_to_cpu(parms->o_key) : 0;
+}
+
+static u32 mlxsw_sp_ipip_parms6_okey(const struct __ip6_tnl_parm *parms)
+{
+ return mlxsw_sp_ipip_parms6_has_okey(parms) ?
+ be32_to_cpu(parms->o_key) : 0;
}
static union mlxsw_sp_l3addr
-mlxsw_sp_ipip_parms4_saddr(struct ip_tunnel_parm parms)
+mlxsw_sp_ipip_parms4_saddr(const struct ip_tunnel_parm *parms)
{
- return (union mlxsw_sp_l3addr) { .addr4 = parms.iph.saddr };
+ return (union mlxsw_sp_l3addr) { .addr4 = parms->iph.saddr };
}
static union mlxsw_sp_l3addr
-mlxsw_sp_ipip_parms6_saddr(struct __ip6_tnl_parm parms)
+mlxsw_sp_ipip_parms6_saddr(const struct __ip6_tnl_parm *parms)
{
- return (union mlxsw_sp_l3addr) { .addr6 = parms.laddr };
+ return (union mlxsw_sp_l3addr) { .addr6 = parms->laddr };
}
static union mlxsw_sp_l3addr
-mlxsw_sp_ipip_parms4_daddr(struct ip_tunnel_parm parms)
+mlxsw_sp_ipip_parms4_daddr(const struct ip_tunnel_parm *parms)
{
- return (union mlxsw_sp_l3addr) { .addr4 = parms.iph.daddr };
+ return (union mlxsw_sp_l3addr) { .addr4 = parms->iph.daddr };
}
static union mlxsw_sp_l3addr
-mlxsw_sp_ipip_parms6_daddr(struct __ip6_tnl_parm parms)
+mlxsw_sp_ipip_parms6_daddr(const struct __ip6_tnl_parm *parms)
{
- return (union mlxsw_sp_l3addr) { .addr6 = parms.raddr };
+ return (union mlxsw_sp_l3addr) { .addr6 = parms->raddr };
}
union mlxsw_sp_l3addr
@@ -80,10 +102,10 @@ mlxsw_sp_ipip_netdev_saddr(enum mlxsw_sp_l3proto proto,
switch (proto) {
case MLXSW_SP_L3_PROTO_IPV4:
parms4 = mlxsw_sp_ipip_netdev_parms4(ol_dev);
- return mlxsw_sp_ipip_parms4_saddr(parms4);
+ return mlxsw_sp_ipip_parms4_saddr(&parms4);
case MLXSW_SP_L3_PROTO_IPV6:
parms6 = mlxsw_sp_ipip_netdev_parms6(ol_dev);
- return mlxsw_sp_ipip_parms6_saddr(parms6);
+ return mlxsw_sp_ipip_parms6_saddr(&parms6);
}
WARN_ON(1);
@@ -95,7 +117,7 @@ static __be32 mlxsw_sp_ipip_netdev_daddr4(const struct net_device *ol_dev)
struct ip_tunnel_parm parms4 = mlxsw_sp_ipip_netdev_parms4(ol_dev);
- return mlxsw_sp_ipip_parms4_daddr(parms4).addr4;
+ return mlxsw_sp_ipip_parms4_daddr(&parms4).addr4;
}
static union mlxsw_sp_l3addr
@@ -108,10 +130,10 @@ mlxsw_sp_ipip_netdev_daddr(enum mlxsw_sp_l3proto proto,
switch (proto) {
case MLXSW_SP_L3_PROTO_IPV4:
parms4 = mlxsw_sp_ipip_netdev_parms4(ol_dev);
- return mlxsw_sp_ipip_parms4_daddr(parms4);
+ return mlxsw_sp_ipip_parms4_daddr(&parms4);
case MLXSW_SP_L3_PROTO_IPV6:
parms6 = mlxsw_sp_ipip_netdev_parms6(ol_dev);
- return mlxsw_sp_ipip_parms6_daddr(parms6);
+ return mlxsw_sp_ipip_parms6_daddr(&parms6);
}
WARN_ON(1);
@@ -125,6 +147,21 @@ bool mlxsw_sp_l3addr_is_zero(union mlxsw_sp_l3addr addr)
return !memcmp(&addr, &naddr, sizeof(naddr));
}
+static struct mlxsw_sp_ipip_parms
+mlxsw_sp_ipip_netdev_parms_init_gre4(const struct net_device *ol_dev)
+{
+ struct ip_tunnel_parm parms = mlxsw_sp_ipip_netdev_parms4(ol_dev);
+
+ return (struct mlxsw_sp_ipip_parms) {
+ .proto = MLXSW_SP_L3_PROTO_IPV4,
+ .saddr = mlxsw_sp_ipip_parms4_saddr(&parms),
+ .daddr = mlxsw_sp_ipip_parms4_daddr(&parms),
+ .link = parms.link,
+ .ikey = mlxsw_sp_ipip_parms4_ikey(&parms),
+ .okey = mlxsw_sp_ipip_parms4_okey(&parms),
+ };
+}
+
static int
mlxsw_sp_ipip_nexthop_update_gre4(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
struct mlxsw_sp_ipip_entry *ipip_entry,
@@ -158,8 +195,8 @@ mlxsw_sp_ipip_decap_config_gre4(struct mlxsw_sp *mlxsw_sp,
u32 ikey;
parms = mlxsw_sp_ipip_netdev_parms4(ipip_entry->ol_dev);
- has_ikey = mlxsw_sp_ipip_parms4_has_ikey(parms);
- ikey = mlxsw_sp_ipip_parms4_ikey(parms);
+ has_ikey = mlxsw_sp_ipip_parms4_has_ikey(&parms);
+ ikey = mlxsw_sp_ipip_parms4_ikey(&parms);
mlxsw_reg_rtdp_pack(rtdp_pl, MLXSW_REG_RTDP_TYPE_IPIP, tunnel_index);
mlxsw_reg_rtdp_egress_router_interface_set(rtdp_pl, ul_rif_id);
@@ -218,12 +255,12 @@ mlxsw_sp_ipip_ol_loopback_config_gre4(struct mlxsw_sp *mlxsw_sp,
struct ip_tunnel_parm parms = mlxsw_sp_ipip_netdev_parms4(ol_dev);
enum mlxsw_reg_ritr_loopback_ipip_type lb_ipipt;
- lb_ipipt = mlxsw_sp_ipip_parms4_has_okey(parms) ?
+ lb_ipipt = mlxsw_sp_ipip_parms4_has_okey(&parms) ?
MLXSW_REG_RITR_LOOPBACK_IPIP_TYPE_IP_IN_GRE_KEY_IN_IP :
MLXSW_REG_RITR_LOOPBACK_IPIP_TYPE_IP_IN_GRE_IN_IP;
return (struct mlxsw_sp_rif_ipip_lb_config){
.lb_ipipt = lb_ipipt,
- .okey = mlxsw_sp_ipip_parms4_okey(parms),
+ .okey = mlxsw_sp_ipip_parms4_okey(&parms),
.ul_protocol = MLXSW_SP_L3_PROTO_IPV4,
.saddr = mlxsw_sp_ipip_netdev_saddr(MLXSW_SP_L3_PROTO_IPV4,
ol_dev),
@@ -231,48 +268,39 @@ mlxsw_sp_ipip_ol_loopback_config_gre4(struct mlxsw_sp *mlxsw_sp,
}
static int
-mlxsw_sp_ipip_ol_netdev_change_gre4(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_ipip_entry *ipip_entry,
- struct netlink_ext_ack *extack)
+mlxsw_sp_ipip_ol_netdev_change_gre(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry,
+ const struct mlxsw_sp_ipip_parms *new_parms,
+ struct netlink_ext_ack *extack)
{
- union mlxsw_sp_l3addr old_saddr, new_saddr;
- union mlxsw_sp_l3addr old_daddr, new_daddr;
- struct ip_tunnel_parm new_parms;
+ const struct mlxsw_sp_ipip_parms *old_parms = &ipip_entry->parms;
bool update_tunnel = false;
bool update_decap = false;
bool update_nhs = false;
int err = 0;
- new_parms = mlxsw_sp_ipip_netdev_parms4(ipip_entry->ol_dev);
-
- new_saddr = mlxsw_sp_ipip_parms4_saddr(new_parms);
- old_saddr = mlxsw_sp_ipip_parms4_saddr(ipip_entry->parms4);
- new_daddr = mlxsw_sp_ipip_parms4_daddr(new_parms);
- old_daddr = mlxsw_sp_ipip_parms4_daddr(ipip_entry->parms4);
-
- if (!mlxsw_sp_l3addr_eq(&new_saddr, &old_saddr)) {
+ if (!mlxsw_sp_l3addr_eq(&new_parms->saddr, &old_parms->saddr)) {
u16 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
/* Since the local address has changed, if there is another
* tunnel with a matching saddr, both need to be demoted.
*/
if (mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp,
- MLXSW_SP_L3_PROTO_IPV4,
- new_saddr, ul_tb_id,
+ new_parms->proto,
+ new_parms->saddr,
+ ul_tb_id,
ipip_entry)) {
mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
return 0;
}
update_tunnel = true;
- } else if ((mlxsw_sp_ipip_parms4_okey(ipip_entry->parms4) !=
- mlxsw_sp_ipip_parms4_okey(new_parms)) ||
- ipip_entry->parms4.link != new_parms.link) {
+ } else if (old_parms->okey != new_parms->okey ||
+ old_parms->link != new_parms->link) {
update_tunnel = true;
- } else if (!mlxsw_sp_l3addr_eq(&new_daddr, &old_daddr)) {
+ } else if (!mlxsw_sp_l3addr_eq(&new_parms->daddr, &old_parms->daddr)) {
update_nhs = true;
- } else if (mlxsw_sp_ipip_parms4_ikey(ipip_entry->parms4) !=
- mlxsw_sp_ipip_parms4_ikey(new_parms)) {
+ } else if (old_parms->ikey != new_parms->ikey) {
update_decap = true;
}
@@ -288,23 +316,308 @@ mlxsw_sp_ipip_ol_netdev_change_gre4(struct mlxsw_sp *mlxsw_sp,
err = __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
false, false, false,
extack);
+ if (err)
+ return err;
- ipip_entry->parms4 = new_parms;
- return err;
+ ipip_entry->parms = *new_parms;
+ return 0;
+}
+
+static int
+mlxsw_sp_ipip_ol_netdev_change_gre4(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_ipip_parms new_parms;
+
+ new_parms = mlxsw_sp_ipip_netdev_parms_init_gre4(ipip_entry->ol_dev);
+ return mlxsw_sp_ipip_ol_netdev_change_gre(mlxsw_sp, ipip_entry,
+ &new_parms, extack);
+}
+
+static int
+mlxsw_sp_ipip_rem_addr_set_gre4(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry)
+{
+ return 0;
+}
+
+static void
+mlxsw_sp_ipip_rem_addr_unset_gre4(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_ipip_entry *ipip_entry)
+{
}
static const struct mlxsw_sp_ipip_ops mlxsw_sp_ipip_gre4_ops = {
.dev_type = ARPHRD_IPGRE,
.ul_proto = MLXSW_SP_L3_PROTO_IPV4,
+ .inc_parsing_depth = false,
+ .parms_init = mlxsw_sp_ipip_netdev_parms_init_gre4,
.nexthop_update = mlxsw_sp_ipip_nexthop_update_gre4,
.decap_config = mlxsw_sp_ipip_decap_config_gre4,
.can_offload = mlxsw_sp_ipip_can_offload_gre4,
.ol_loopback_config = mlxsw_sp_ipip_ol_loopback_config_gre4,
.ol_netdev_change = mlxsw_sp_ipip_ol_netdev_change_gre4,
+ .rem_ip_addr_set = mlxsw_sp_ipip_rem_addr_set_gre4,
+ .rem_ip_addr_unset = mlxsw_sp_ipip_rem_addr_unset_gre4,
};
-const struct mlxsw_sp_ipip_ops *mlxsw_sp_ipip_ops_arr[] = {
+static struct mlxsw_sp_ipip_parms
+mlxsw_sp1_ipip_netdev_parms_init_gre6(const struct net_device *ol_dev)
+{
+ struct mlxsw_sp_ipip_parms parms = {0};
+
+ WARN_ON_ONCE(1);
+ return parms;
+}
+
+static int
+mlxsw_sp1_ipip_nexthop_update_gre6(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
+ struct mlxsw_sp_ipip_entry *ipip_entry,
+ bool force, char *ratr_pl)
+{
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+}
+
+static int
+mlxsw_sp1_ipip_decap_config_gre6(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry,
+ u32 tunnel_index)
+{
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+}
+
+static bool mlxsw_sp1_ipip_can_offload_gre6(const struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *ol_dev)
+{
+ return false;
+}
+
+static struct mlxsw_sp_rif_ipip_lb_config
+mlxsw_sp1_ipip_ol_loopback_config_gre6(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *ol_dev)
+{
+ struct mlxsw_sp_rif_ipip_lb_config config = {0};
+
+ WARN_ON_ONCE(1);
+ return config;
+}
+
+static int
+mlxsw_sp1_ipip_ol_netdev_change_gre6(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry,
+ struct netlink_ext_ack *extack)
+{
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+}
+
+static int
+mlxsw_sp1_ipip_rem_addr_set_gre6(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry)
+{
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+}
+
+static void
+mlxsw_sp1_ipip_rem_addr_unset_gre6(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_ipip_entry *ipip_entry)
+{
+ WARN_ON_ONCE(1);
+}
+
+static const struct mlxsw_sp_ipip_ops mlxsw_sp1_ipip_gre6_ops = {
+ .dev_type = ARPHRD_IP6GRE,
+ .ul_proto = MLXSW_SP_L3_PROTO_IPV6,
+ .inc_parsing_depth = true,
+ .parms_init = mlxsw_sp1_ipip_netdev_parms_init_gre6,
+ .nexthop_update = mlxsw_sp1_ipip_nexthop_update_gre6,
+ .decap_config = mlxsw_sp1_ipip_decap_config_gre6,
+ .can_offload = mlxsw_sp1_ipip_can_offload_gre6,
+ .ol_loopback_config = mlxsw_sp1_ipip_ol_loopback_config_gre6,
+ .ol_netdev_change = mlxsw_sp1_ipip_ol_netdev_change_gre6,
+ .rem_ip_addr_set = mlxsw_sp1_ipip_rem_addr_set_gre6,
+ .rem_ip_addr_unset = mlxsw_sp1_ipip_rem_addr_unset_gre6,
+};
+
+const struct mlxsw_sp_ipip_ops *mlxsw_sp1_ipip_ops_arr[] = {
[MLXSW_SP_IPIP_TYPE_GRE4] = &mlxsw_sp_ipip_gre4_ops,
+ [MLXSW_SP_IPIP_TYPE_GRE6] = &mlxsw_sp1_ipip_gre6_ops,
+};
+
+static struct mlxsw_sp_ipip_parms
+mlxsw_sp2_ipip_netdev_parms_init_gre6(const struct net_device *ol_dev)
+{
+ struct __ip6_tnl_parm parms = mlxsw_sp_ipip_netdev_parms6(ol_dev);
+
+ return (struct mlxsw_sp_ipip_parms) {
+ .proto = MLXSW_SP_L3_PROTO_IPV6,
+ .saddr = mlxsw_sp_ipip_parms6_saddr(&parms),
+ .daddr = mlxsw_sp_ipip_parms6_daddr(&parms),
+ .link = parms.link,
+ .ikey = mlxsw_sp_ipip_parms6_ikey(&parms),
+ .okey = mlxsw_sp_ipip_parms6_okey(&parms),
+ };
+}
+
+static int
+mlxsw_sp2_ipip_nexthop_update_gre6(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
+ struct mlxsw_sp_ipip_entry *ipip_entry,
+ bool force, char *ratr_pl)
+{
+ u16 rif_index = mlxsw_sp_ipip_lb_rif_index(ipip_entry->ol_lb);
+ enum mlxsw_reg_ratr_op op;
+
+ op = force ? MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY :
+ MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY_ON_ACTIVITY;
+ mlxsw_reg_ratr_pack(ratr_pl, op, true, MLXSW_REG_RATR_TYPE_IPIP,
+ adj_index, rif_index);
+ mlxsw_reg_ratr_ipip6_entry_pack(ratr_pl,
+ ipip_entry->dip_kvdl_index);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
+}
+
+static int
+mlxsw_sp2_ipip_decap_config_gre6(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry,
+ u32 tunnel_index)
+{
+ u16 rif_index = mlxsw_sp_ipip_lb_rif_index(ipip_entry->ol_lb);
+ u16 ul_rif_id = mlxsw_sp_ipip_lb_ul_rif_id(ipip_entry->ol_lb);
+ char rtdp_pl[MLXSW_REG_RTDP_LEN];
+ struct __ip6_tnl_parm parms;
+ unsigned int type_check;
+ bool has_ikey;
+ u32 ikey;
+
+ parms = mlxsw_sp_ipip_netdev_parms6(ipip_entry->ol_dev);
+ has_ikey = mlxsw_sp_ipip_parms6_has_ikey(&parms);
+ ikey = mlxsw_sp_ipip_parms6_ikey(&parms);
+
+ mlxsw_reg_rtdp_pack(rtdp_pl, MLXSW_REG_RTDP_TYPE_IPIP, tunnel_index);
+ mlxsw_reg_rtdp_egress_router_interface_set(rtdp_pl, ul_rif_id);
+
+ type_check = has_ikey ?
+ MLXSW_REG_RTDP_IPIP_TYPE_CHECK_ALLOW_GRE_KEY :
+ MLXSW_REG_RTDP_IPIP_TYPE_CHECK_ALLOW_GRE;
+
+ /* Linux demuxes tunnels based on packet SIP (which must match tunnel
+ * remote IP). Thus configure decap so that it filters out packets that
+ * are not IPv6 or have the wrong SIP. IPIP_DECAP_ERROR trap is
+ * generated for packets that fail this criterion. Linux then handles
+ * such packets in slow path and generates ICMP destination unreachable.
+ */
+ mlxsw_reg_rtdp_ipip6_pack(rtdp_pl, rif_index,
+ MLXSW_REG_RTDP_IPIP_SIP_CHECK_FILTER_IPV6,
+ type_check, has_ikey,
+ ipip_entry->dip_kvdl_index, ikey);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtdp), rtdp_pl);
+}
+
+static bool mlxsw_sp2_ipip_can_offload_gre6(const struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *ol_dev)
+{
+ struct __ip6_tnl_parm tparm = mlxsw_sp_ipip_netdev_parms6(ol_dev);
+ bool inherit_tos = tparm.flags & IP6_TNL_F_USE_ORIG_TCLASS;
+ bool inherit_ttl = tparm.hop_limit == 0;
+ __be16 okflags = TUNNEL_KEY; /* We can't offload any other features. */
+
+ return (tparm.i_flags & ~okflags) == 0 &&
+ (tparm.o_flags & ~okflags) == 0 &&
+ inherit_ttl && inherit_tos &&
+ mlxsw_sp_ipip_tunnel_complete(MLXSW_SP_L3_PROTO_IPV6, ol_dev);
+}
+
+static struct mlxsw_sp_rif_ipip_lb_config
+mlxsw_sp2_ipip_ol_loopback_config_gre6(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *ol_dev)
+{
+ struct __ip6_tnl_parm parms = mlxsw_sp_ipip_netdev_parms6(ol_dev);
+ enum mlxsw_reg_ritr_loopback_ipip_type lb_ipipt;
+
+ lb_ipipt = mlxsw_sp_ipip_parms6_has_okey(&parms) ?
+ MLXSW_REG_RITR_LOOPBACK_IPIP_TYPE_IP_IN_GRE_KEY_IN_IP :
+ MLXSW_REG_RITR_LOOPBACK_IPIP_TYPE_IP_IN_GRE_IN_IP;
+ return (struct mlxsw_sp_rif_ipip_lb_config){
+ .lb_ipipt = lb_ipipt,
+ .okey = mlxsw_sp_ipip_parms6_okey(&parms),
+ .ul_protocol = MLXSW_SP_L3_PROTO_IPV6,
+ .saddr = mlxsw_sp_ipip_netdev_saddr(MLXSW_SP_L3_PROTO_IPV6,
+ ol_dev),
+ };
+}
+
+static int
+mlxsw_sp2_ipip_ol_netdev_change_gre6(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_ipip_parms new_parms;
+
+ new_parms = mlxsw_sp2_ipip_netdev_parms_init_gre6(ipip_entry->ol_dev);
+ return mlxsw_sp_ipip_ol_netdev_change_gre(mlxsw_sp, ipip_entry,
+ &new_parms, extack);
+}
+
+static int
+mlxsw_sp2_ipip_rem_addr_set_gre6(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry)
+{
+ char rips_pl[MLXSW_REG_RIPS_LEN];
+ struct __ip6_tnl_parm parms6;
+ int err;
+
+ err = mlxsw_sp_kvdl_alloc(mlxsw_sp,
+ MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1,
+ &ipip_entry->dip_kvdl_index);
+ if (err)
+ return err;
+
+ parms6 = mlxsw_sp_ipip_netdev_parms6(ipip_entry->ol_dev);
+ mlxsw_reg_rips_pack(rips_pl, ipip_entry->dip_kvdl_index,
+ &parms6.raddr);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rips), rips_pl);
+ if (err)
+ goto err_rips_write;
+
+ return 0;
+
+err_rips_write:
+ mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1,
+ ipip_entry->dip_kvdl_index);
+ return err;
+}
+
+static void
+mlxsw_sp2_ipip_rem_addr_unset_gre6(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_ipip_entry *ipip_entry)
+{
+ mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1,
+ ipip_entry->dip_kvdl_index);
+}
+
+static const struct mlxsw_sp_ipip_ops mlxsw_sp2_ipip_gre6_ops = {
+ .dev_type = ARPHRD_IP6GRE,
+ .ul_proto = MLXSW_SP_L3_PROTO_IPV6,
+ .inc_parsing_depth = true,
+ .parms_init = mlxsw_sp2_ipip_netdev_parms_init_gre6,
+ .nexthop_update = mlxsw_sp2_ipip_nexthop_update_gre6,
+ .decap_config = mlxsw_sp2_ipip_decap_config_gre6,
+ .can_offload = mlxsw_sp2_ipip_can_offload_gre6,
+ .ol_loopback_config = mlxsw_sp2_ipip_ol_loopback_config_gre6,
+ .ol_netdev_change = mlxsw_sp2_ipip_ol_netdev_change_gre6,
+ .rem_ip_addr_set = mlxsw_sp2_ipip_rem_addr_set_gre6,
+ .rem_ip_addr_unset = mlxsw_sp2_ipip_rem_addr_unset_gre6,
+};
+
+const struct mlxsw_sp_ipip_ops *mlxsw_sp2_ipip_ops_arr[] = {
+ [MLXSW_SP_IPIP_TYPE_GRE4] = &mlxsw_sp_ipip_gre4_ops,
+ [MLXSW_SP_IPIP_TYPE_GRE6] = &mlxsw_sp2_ipip_gre6_ops,
};
static int mlxsw_sp_ipip_ecn_encap_init_one(struct mlxsw_sp *mlxsw_sp,
@@ -363,3 +676,22 @@ int mlxsw_sp_ipip_ecn_decap_init(struct mlxsw_sp *mlxsw_sp)
return 0;
}
+
+struct net_device *
+mlxsw_sp_ipip_netdev_ul_dev_get(const struct net_device *ol_dev)
+{
+ struct net *net = dev_net(ol_dev);
+ struct ip_tunnel *tun4;
+ struct ip6_tnl *tun6;
+
+ switch (ol_dev->type) {
+ case ARPHRD_IPGRE:
+ tun4 = netdev_priv(ol_dev);
+ return dev_get_by_index_rcu(net, tun4->parms.link);
+ case ARPHRD_IP6GRE:
+ tun6 = netdev_priv(ol_dev);
+ return dev_get_by_index_rcu(net, tun6->parms.link);
+ default:
+ return NULL;
+ }
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h
index f0837b42d1d6..8cc259dcc8d0 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h
@@ -7,6 +7,7 @@
#include "spectrum_router.h"
#include <net/ip_fib.h>
#include <linux/if_tunnel.h>
+#include <net/ip6_tunnel.h>
struct ip_tunnel_parm
mlxsw_sp_ipip_netdev_parms4(const struct net_device *ol_dev);
@@ -21,23 +22,36 @@ bool mlxsw_sp_l3addr_is_zero(union mlxsw_sp_l3addr addr);
enum mlxsw_sp_ipip_type {
MLXSW_SP_IPIP_TYPE_GRE4,
+ MLXSW_SP_IPIP_TYPE_GRE6,
MLXSW_SP_IPIP_TYPE_MAX,
};
+struct mlxsw_sp_ipip_parms {
+ enum mlxsw_sp_l3proto proto;
+ union mlxsw_sp_l3addr saddr;
+ union mlxsw_sp_l3addr daddr;
+ int link;
+ u32 ikey;
+ u32 okey;
+};
+
struct mlxsw_sp_ipip_entry {
enum mlxsw_sp_ipip_type ipipt;
struct net_device *ol_dev; /* Overlay. */
struct mlxsw_sp_rif_ipip_lb *ol_lb;
struct mlxsw_sp_fib_entry *decap_fib_entry;
struct list_head ipip_list_node;
- union {
- struct ip_tunnel_parm parms4;
- };
+ struct mlxsw_sp_ipip_parms parms;
+ u32 dip_kvdl_index;
};
struct mlxsw_sp_ipip_ops {
int dev_type;
enum mlxsw_sp_l3proto ul_proto; /* Underlay. */
+ bool inc_parsing_depth;
+
+ struct mlxsw_sp_ipip_parms
+ (*parms_init)(const struct net_device *ol_dev);
int (*nexthop_update)(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
struct mlxsw_sp_ipip_entry *ipip_entry,
@@ -58,8 +72,13 @@ struct mlxsw_sp_ipip_ops {
int (*ol_netdev_change)(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_ipip_entry *ipip_entry,
struct netlink_ext_ack *extack);
+ int (*rem_ip_addr_set)(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry);
+ void (*rem_ip_addr_unset)(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_ipip_entry *ipip_entry);
};
-extern const struct mlxsw_sp_ipip_ops *mlxsw_sp_ipip_ops_arr[];
+extern const struct mlxsw_sp_ipip_ops *mlxsw_sp1_ipip_ops_arr[];
+extern const struct mlxsw_sp_ipip_ops *mlxsw_sp2_ipip_ops_arr[];
#endif /* _MLXSW_IPIP_H_*/
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
index 9958d503bf0e..ddb5ad88b350 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
@@ -50,12 +50,24 @@ struct mlxsw_sp_qdisc_ops {
struct mlxsw_sp_qdisc *(*find_class)(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
u32 parent);
unsigned int num_classes;
+
+ u8 (*get_prio_bitmap)(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ struct mlxsw_sp_qdisc *child);
+ int (*get_tclass_num)(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ struct mlxsw_sp_qdisc *child);
+};
+
+struct mlxsw_sp_qdisc_ets_band {
+ u8 prio_bitmap;
+ int tclass_num;
+};
+
+struct mlxsw_sp_qdisc_ets_data {
+ struct mlxsw_sp_qdisc_ets_band bands[IEEE_8021QAZ_MAX_TCS];
};
struct mlxsw_sp_qdisc {
u32 handle;
- int tclass_num;
- u8 prio_bitmap;
union {
struct red_stats red;
} xstats_base;
@@ -67,6 +79,10 @@ struct mlxsw_sp_qdisc {
u64 backlog;
} stats_base;
+ union {
+ struct mlxsw_sp_qdisc_ets_data *ets_data;
+ };
+
struct mlxsw_sp_qdisc_ops *ops;
struct mlxsw_sp_qdisc *parent;
struct mlxsw_sp_qdisc *qdiscs;
@@ -141,8 +157,7 @@ mlxsw_sp_qdisc_walk_cb_find(struct mlxsw_sp_qdisc *qdisc, void *data)
}
static struct mlxsw_sp_qdisc *
-mlxsw_sp_qdisc_find(struct mlxsw_sp_port *mlxsw_sp_port, u32 parent,
- bool root_only)
+mlxsw_sp_qdisc_find(struct mlxsw_sp_port *mlxsw_sp_port, u32 parent)
{
struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
@@ -150,8 +165,6 @@ mlxsw_sp_qdisc_find(struct mlxsw_sp_port *mlxsw_sp_port, u32 parent,
return NULL;
if (parent == TC_H_ROOT)
return &qdisc_state->root_qdisc;
- if (root_only)
- return NULL;
return mlxsw_sp_qdisc_walk(&qdisc_state->root_qdisc,
mlxsw_sp_qdisc_walk_cb_find, &parent);
}
@@ -187,6 +200,32 @@ mlxsw_sp_qdisc_reduce_parent_backlog(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
tmp->stats_base.backlog -= mlxsw_sp_qdisc->stats_base.backlog;
}
+static u8 mlxsw_sp_qdisc_get_prio_bitmap(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
+{
+ struct mlxsw_sp_qdisc *parent = mlxsw_sp_qdisc->parent;
+
+ if (!parent)
+ return 0xff;
+ if (!parent->ops->get_prio_bitmap)
+ return mlxsw_sp_qdisc_get_prio_bitmap(mlxsw_sp_port, parent);
+ return parent->ops->get_prio_bitmap(parent, mlxsw_sp_qdisc);
+}
+
+#define MLXSW_SP_PORT_DEFAULT_TCLASS 0
+
+static int mlxsw_sp_qdisc_get_tclass_num(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
+{
+ struct mlxsw_sp_qdisc *parent = mlxsw_sp_qdisc->parent;
+
+ if (!parent)
+ return MLXSW_SP_PORT_DEFAULT_TCLASS;
+ if (!parent->ops->get_tclass_num)
+ return mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port, parent);
+ return parent->ops->get_tclass_num(parent, mlxsw_sp_qdisc);
+}
+
static int
mlxsw_sp_qdisc_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
@@ -194,6 +233,7 @@ mlxsw_sp_qdisc_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *root_qdisc = &mlxsw_sp_port->qdisc->root_qdisc;
int err_hdroom = 0;
int err = 0;
+ int i;
if (!mlxsw_sp_qdisc)
return 0;
@@ -211,6 +251,9 @@ mlxsw_sp_qdisc_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
if (!mlxsw_sp_qdisc->ops)
return 0;
+ for (i = 0; i < mlxsw_sp_qdisc->num_classes; i++)
+ mlxsw_sp_qdisc_destroy(mlxsw_sp_port,
+ &mlxsw_sp_qdisc->qdiscs[i]);
mlxsw_sp_qdisc_reduce_parent_backlog(mlxsw_sp_qdisc);
if (mlxsw_sp_qdisc->ops->destroy)
err = mlxsw_sp_qdisc->ops->destroy(mlxsw_sp_port,
@@ -226,6 +269,78 @@ mlxsw_sp_qdisc_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
return err_hdroom ?: err;
}
+struct mlxsw_sp_qdisc_tree_validate {
+ bool forbid_ets;
+ bool forbid_tbf;
+ bool forbid_red;
+};
+
+static int
+__mlxsw_sp_qdisc_tree_validate(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ struct mlxsw_sp_qdisc_tree_validate validate);
+
+static int
+mlxsw_sp_qdisc_tree_validate_children(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ struct mlxsw_sp_qdisc_tree_validate validate)
+{
+ unsigned int i;
+ int err;
+
+ for (i = 0; i < mlxsw_sp_qdisc->num_classes; i++) {
+ err = __mlxsw_sp_qdisc_tree_validate(&mlxsw_sp_qdisc->qdiscs[i],
+ validate);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int
+__mlxsw_sp_qdisc_tree_validate(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ struct mlxsw_sp_qdisc_tree_validate validate)
+{
+ if (!mlxsw_sp_qdisc->ops)
+ return 0;
+
+ switch (mlxsw_sp_qdisc->ops->type) {
+ case MLXSW_SP_QDISC_FIFO:
+ break;
+ case MLXSW_SP_QDISC_RED:
+ if (validate.forbid_red)
+ return -EINVAL;
+ validate.forbid_red = true;
+ validate.forbid_ets = true;
+ break;
+ case MLXSW_SP_QDISC_TBF:
+ if (validate.forbid_tbf)
+ return -EINVAL;
+ validate.forbid_tbf = true;
+ validate.forbid_ets = true;
+ break;
+ case MLXSW_SP_QDISC_PRIO:
+ case MLXSW_SP_QDISC_ETS:
+ if (validate.forbid_ets)
+ return -EINVAL;
+ validate.forbid_ets = true;
+ break;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ return mlxsw_sp_qdisc_tree_validate_children(mlxsw_sp_qdisc, validate);
+}
+
+static int mlxsw_sp_qdisc_tree_validate(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ struct mlxsw_sp_qdisc_tree_validate validate = {};
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
+
+ mlxsw_sp_qdisc = &mlxsw_sp_port->qdisc->root_qdisc;
+ return __mlxsw_sp_qdisc_tree_validate(mlxsw_sp_qdisc, validate);
+}
+
static int mlxsw_sp_qdisc_create(struct mlxsw_sp_port *mlxsw_sp_port,
u32 handle,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
@@ -268,6 +383,10 @@ static int mlxsw_sp_qdisc_create(struct mlxsw_sp_port *mlxsw_sp_port,
mlxsw_sp_qdisc->num_classes = ops->num_classes;
mlxsw_sp_qdisc->ops = ops;
mlxsw_sp_qdisc->handle = handle;
+ err = mlxsw_sp_qdisc_tree_validate(mlxsw_sp_port);
+ if (err)
+ goto err_replace;
+
err = ops->replace(mlxsw_sp_port, handle, mlxsw_sp_qdisc, params);
if (err)
goto err_replace;
@@ -406,13 +525,17 @@ mlxsw_sp_qdisc_collect_tc_stats(struct mlxsw_sp_port *mlxsw_sp_port,
u64 *p_tx_bytes, u64 *p_tx_packets,
u64 *p_drops, u64 *p_backlog)
{
- int tclass_num = mlxsw_sp_qdisc->tclass_num;
struct mlxsw_sp_port_xstats *xstats;
u64 tx_bytes, tx_packets;
+ u8 prio_bitmap;
+ int tclass_num;
+ prio_bitmap = mlxsw_sp_qdisc_get_prio_bitmap(mlxsw_sp_port,
+ mlxsw_sp_qdisc);
+ tclass_num = mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port,
+ mlxsw_sp_qdisc);
xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
- mlxsw_sp_qdisc_bstats_per_priority_get(xstats,
- mlxsw_sp_qdisc->prio_bitmap,
+ mlxsw_sp_qdisc_bstats_per_priority_get(xstats, prio_bitmap,
&tx_packets, &tx_bytes);
*p_tx_packets += tx_packets;
@@ -506,19 +629,24 @@ static void
mlxsw_sp_setup_tc_qdisc_red_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
{
- int tclass_num = mlxsw_sp_qdisc->tclass_num;
struct mlxsw_sp_qdisc_stats *stats_base;
struct mlxsw_sp_port_xstats *xstats;
struct red_stats *red_base;
+ u8 prio_bitmap;
+ int tclass_num;
+ prio_bitmap = mlxsw_sp_qdisc_get_prio_bitmap(mlxsw_sp_port,
+ mlxsw_sp_qdisc);
+ tclass_num = mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port,
+ mlxsw_sp_qdisc);
xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
stats_base = &mlxsw_sp_qdisc->stats_base;
red_base = &mlxsw_sp_qdisc->xstats_base.red;
- mlxsw_sp_qdisc_bstats_per_priority_get(xstats,
- mlxsw_sp_qdisc->prio_bitmap,
+ mlxsw_sp_qdisc_bstats_per_priority_get(xstats, prio_bitmap,
&stats_base->tx_packets,
&stats_base->tx_bytes);
+ red_base->prob_mark = xstats->tc_ecn[tclass_num];
red_base->prob_drop = xstats->wred_drop[tclass_num];
red_base->pdrop = mlxsw_sp_xstats_tail_drop(xstats, tclass_num);
@@ -532,8 +660,10 @@ static int
mlxsw_sp_qdisc_red_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
{
- return mlxsw_sp_tclass_congestion_disable(mlxsw_sp_port,
- mlxsw_sp_qdisc->tclass_num);
+ int tclass_num = mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port,
+ mlxsw_sp_qdisc);
+
+ return mlxsw_sp_tclass_congestion_disable(mlxsw_sp_port, tclass_num);
}
static int
@@ -564,15 +694,33 @@ mlxsw_sp_qdisc_red_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
}
static int
+mlxsw_sp_qdisc_future_fifo_replace(struct mlxsw_sp_port *mlxsw_sp_port,
+ u32 handle, unsigned int band,
+ struct mlxsw_sp_qdisc *child_qdisc);
+static void
+mlxsw_sp_qdisc_future_fifos_init(struct mlxsw_sp_port *mlxsw_sp_port,
+ u32 handle);
+
+static int
mlxsw_sp_qdisc_red_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
void *params)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct tc_red_qopt_offload_params *p = params;
- int tclass_num = mlxsw_sp_qdisc->tclass_num;
+ int tclass_num;
u32 min, max;
u64 prob;
+ int err;
+
+ err = mlxsw_sp_qdisc_future_fifo_replace(mlxsw_sp_port, handle, 0,
+ &mlxsw_sp_qdisc->qdiscs[0]);
+ if (err)
+ return err;
+ mlxsw_sp_qdisc_future_fifos_init(mlxsw_sp_port, TC_H_UNSPEC);
+
+ tclass_num = mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port,
+ mlxsw_sp_qdisc);
/* calculate probability in percentage */
prob = p->probability;
@@ -615,22 +763,27 @@ mlxsw_sp_qdisc_get_red_xstats(struct mlxsw_sp_port *mlxsw_sp_port,
void *xstats_ptr)
{
struct red_stats *xstats_base = &mlxsw_sp_qdisc->xstats_base.red;
- int tclass_num = mlxsw_sp_qdisc->tclass_num;
struct mlxsw_sp_port_xstats *xstats;
struct red_stats *res = xstats_ptr;
- int early_drops, pdrops;
+ int early_drops, marks, pdrops;
+ int tclass_num;
+ tclass_num = mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port,
+ mlxsw_sp_qdisc);
xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
early_drops = xstats->wred_drop[tclass_num] - xstats_base->prob_drop;
+ marks = xstats->tc_ecn[tclass_num] - xstats_base->prob_mark;
pdrops = mlxsw_sp_xstats_tail_drop(xstats, tclass_num) -
xstats_base->pdrop;
res->pdrop += pdrops;
res->prob_drop += early_drops;
+ res->prob_mark += marks;
xstats_base->pdrop += pdrops;
xstats_base->prob_drop += early_drops;
+ xstats_base->prob_mark += marks;
return 0;
}
@@ -639,16 +792,19 @@ mlxsw_sp_qdisc_get_red_stats(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
struct tc_qopt_offload_stats *stats_ptr)
{
- int tclass_num = mlxsw_sp_qdisc->tclass_num;
struct mlxsw_sp_qdisc_stats *stats_base;
struct mlxsw_sp_port_xstats *xstats;
u64 overlimits;
+ int tclass_num;
+ tclass_num = mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port,
+ mlxsw_sp_qdisc);
xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
stats_base = &mlxsw_sp_qdisc->stats_base;
mlxsw_sp_qdisc_get_tc_stats(mlxsw_sp_port, mlxsw_sp_qdisc, stats_ptr);
- overlimits = xstats->wred_drop[tclass_num] - stats_base->overlimits;
+ overlimits = xstats->wred_drop[tclass_num] +
+ xstats->tc_ecn[tclass_num] - stats_base->overlimits;
stats_ptr->qstats->overlimits += overlimits;
stats_base->overlimits += overlimits;
@@ -660,11 +816,12 @@ static struct mlxsw_sp_qdisc *
mlxsw_sp_qdisc_leaf_find_class(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
u32 parent)
{
- return NULL;
+ /* RED and TBF are formally classful qdiscs, but all class references,
+ * including X:0, just refer to the same one class.
+ */
+ return &mlxsw_sp_qdisc->qdiscs[0];
}
-#define MLXSW_SP_PORT_DEFAULT_TCLASS 0
-
static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_red = {
.type = MLXSW_SP_QDISC_RED,
.check_params = mlxsw_sp_qdisc_red_check_params,
@@ -675,14 +832,19 @@ static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_red = {
.get_xstats = mlxsw_sp_qdisc_get_red_xstats,
.clean_stats = mlxsw_sp_setup_tc_qdisc_red_clean_stats,
.find_class = mlxsw_sp_qdisc_leaf_find_class,
+ .num_classes = 1,
};
+static int mlxsw_sp_qdisc_graft(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ u8 band, u32 child_handle);
+
static int __mlxsw_sp_setup_tc_red(struct mlxsw_sp_port *mlxsw_sp_port,
struct tc_red_qopt_offload *p)
{
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
- mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent, false);
+ mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent);
if (!mlxsw_sp_qdisc)
return -EOPNOTSUPP;
@@ -704,6 +866,9 @@ static int __mlxsw_sp_setup_tc_red(struct mlxsw_sp_port *mlxsw_sp_port,
case TC_RED_STATS:
return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
&p->stats);
+ case TC_RED_GRAFT:
+ return mlxsw_sp_qdisc_graft(mlxsw_sp_port, mlxsw_sp_qdisc, 0,
+ p->child_handle);
default:
return -EOPNOTSUPP;
}
@@ -744,9 +909,12 @@ static int
mlxsw_sp_qdisc_tbf_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
{
+ int tclass_num = mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port,
+ mlxsw_sp_qdisc);
+
return mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
MLXSW_REG_QEEC_HR_SUBGROUP,
- mlxsw_sp_qdisc->tclass_num, 0,
+ tclass_num, 0,
MLXSW_REG_QEEC_MAS_DIS, 0);
}
@@ -830,9 +998,19 @@ mlxsw_sp_qdisc_tbf_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
{
struct tc_tbf_qopt_offload_replace_params *p = params;
u64 rate_kbps = mlxsw_sp_qdisc_tbf_rate_kbps(p);
+ int tclass_num;
u8 burst_size;
int err;
+ err = mlxsw_sp_qdisc_future_fifo_replace(mlxsw_sp_port, handle, 0,
+ &mlxsw_sp_qdisc->qdiscs[0]);
+ if (err)
+ return err;
+ mlxsw_sp_qdisc_future_fifos_init(mlxsw_sp_port, TC_H_UNSPEC);
+
+ tclass_num = mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port,
+ mlxsw_sp_qdisc);
+
err = mlxsw_sp_qdisc_tbf_bs(mlxsw_sp_port, p->max_size, &burst_size);
if (WARN_ON_ONCE(err))
/* check_params above was supposed to reject this value. */
@@ -848,7 +1026,7 @@ mlxsw_sp_qdisc_tbf_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
*/
return mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
MLXSW_REG_QEEC_HR_SUBGROUP,
- mlxsw_sp_qdisc->tclass_num, 0,
+ tclass_num, 0,
rate_kbps, burst_size);
}
@@ -881,6 +1059,7 @@ static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_tbf = {
.get_stats = mlxsw_sp_qdisc_get_tbf_stats,
.clean_stats = mlxsw_sp_setup_tc_qdisc_leaf_clean_stats,
.find_class = mlxsw_sp_qdisc_leaf_find_class,
+ .num_classes = 1,
};
static int __mlxsw_sp_setup_tc_tbf(struct mlxsw_sp_port *mlxsw_sp_port,
@@ -888,7 +1067,7 @@ static int __mlxsw_sp_setup_tc_tbf(struct mlxsw_sp_port *mlxsw_sp_port,
{
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
- mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent, false);
+ mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent);
if (!mlxsw_sp_qdisc)
return -EOPNOTSUPP;
@@ -907,6 +1086,9 @@ static int __mlxsw_sp_setup_tc_tbf(struct mlxsw_sp_port *mlxsw_sp_port,
case TC_TBF_STATS:
return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
&p->stats);
+ case TC_TBF_GRAFT:
+ return mlxsw_sp_qdisc_graft(mlxsw_sp_port, mlxsw_sp_qdisc, 0,
+ p->child_handle);
default:
return -EOPNOTSUPP;
}
@@ -957,6 +1139,32 @@ static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_fifo = {
.clean_stats = mlxsw_sp_setup_tc_qdisc_leaf_clean_stats,
};
+static int
+mlxsw_sp_qdisc_future_fifo_replace(struct mlxsw_sp_port *mlxsw_sp_port,
+ u32 handle, unsigned int band,
+ struct mlxsw_sp_qdisc *child_qdisc)
+{
+ struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
+
+ if (handle == qdisc_state->future_handle &&
+ qdisc_state->future_fifos[band])
+ return mlxsw_sp_qdisc_replace(mlxsw_sp_port, TC_H_UNSPEC,
+ child_qdisc,
+ &mlxsw_sp_qdisc_ops_fifo,
+ NULL);
+ return 0;
+}
+
+static void
+mlxsw_sp_qdisc_future_fifos_init(struct mlxsw_sp_port *mlxsw_sp_port,
+ u32 handle)
+{
+ struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
+
+ qdisc_state->future_handle = handle;
+ memset(qdisc_state->future_fifos, 0, sizeof(qdisc_state->future_fifos));
+}
+
static int __mlxsw_sp_setup_tc_fifo(struct mlxsw_sp_port *mlxsw_sp_port,
struct tc_fifo_qopt_offload *p)
{
@@ -965,16 +1173,15 @@ static int __mlxsw_sp_setup_tc_fifo(struct mlxsw_sp_port *mlxsw_sp_port,
unsigned int band;
u32 parent_handle;
- mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent, false);
+ mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent);
if (!mlxsw_sp_qdisc && p->handle == TC_H_UNSPEC) {
parent_handle = TC_H_MAJ(p->parent);
if (parent_handle != qdisc_state->future_handle) {
/* This notifications is for a different Qdisc than
* previously. Wipe the future cache.
*/
- memset(qdisc_state->future_fifos, 0,
- sizeof(qdisc_state->future_fifos));
- qdisc_state->future_handle = parent_handle;
+ mlxsw_sp_qdisc_future_fifos_init(mlxsw_sp_port,
+ parent_handle);
}
band = TC_H_MIN(p->parent) - 1;
@@ -1033,11 +1240,10 @@ static int __mlxsw_sp_qdisc_ets_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
mlxsw_sp_port_ets_set(mlxsw_sp_port,
MLXSW_REG_QEEC_HR_SUBGROUP,
i, 0, false, 0);
- mlxsw_sp_qdisc_destroy(mlxsw_sp_port,
- &mlxsw_sp_qdisc->qdiscs[i]);
- mlxsw_sp_qdisc->qdiscs[i].prio_bitmap = 0;
}
+ kfree(mlxsw_sp_qdisc->ets_data);
+ mlxsw_sp_qdisc->ets_data = NULL;
return 0;
}
@@ -1066,6 +1272,31 @@ mlxsw_sp_qdisc_prio_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
return __mlxsw_sp_qdisc_ets_check_params(p->bands);
}
+static struct mlxsw_sp_qdisc *
+mlxsw_sp_qdisc_walk_cb_clean_stats(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ void *mlxsw_sp_port)
+{
+ u64 backlog;
+
+ if (mlxsw_sp_qdisc->ops) {
+ backlog = mlxsw_sp_qdisc->stats_base.backlog;
+ if (mlxsw_sp_qdisc->ops->clean_stats)
+ mlxsw_sp_qdisc->ops->clean_stats(mlxsw_sp_port,
+ mlxsw_sp_qdisc);
+ mlxsw_sp_qdisc->stats_base.backlog = backlog;
+ }
+
+ return NULL;
+}
+
+static void
+mlxsw_sp_qdisc_tree_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
+{
+ mlxsw_sp_qdisc_walk(mlxsw_sp_qdisc, mlxsw_sp_qdisc_walk_cb_clean_stats,
+ mlxsw_sp_port);
+}
+
static int
__mlxsw_sp_qdisc_ets_replace(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
@@ -1074,69 +1305,80 @@ __mlxsw_sp_qdisc_ets_replace(struct mlxsw_sp_port *mlxsw_sp_port,
const unsigned int *weights,
const u8 *priomap)
{
- struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
+ struct mlxsw_sp_qdisc_ets_data *ets_data = mlxsw_sp_qdisc->ets_data;
+ struct mlxsw_sp_qdisc_ets_band *ets_band;
struct mlxsw_sp_qdisc *child_qdisc;
- int tclass, i, band, backlog;
- u8 old_priomap;
+ u8 old_priomap, new_priomap;
+ int i, band;
int err;
+ if (!ets_data) {
+ ets_data = kzalloc(sizeof(*ets_data), GFP_KERNEL);
+ if (!ets_data)
+ return -ENOMEM;
+ mlxsw_sp_qdisc->ets_data = ets_data;
+
+ for (band = 0; band < mlxsw_sp_qdisc->num_classes; band++) {
+ int tclass_num = MLXSW_SP_PRIO_BAND_TO_TCLASS(band);
+
+ ets_band = &ets_data->bands[band];
+ ets_band->tclass_num = tclass_num;
+ }
+ }
+
for (band = 0; band < nbands; band++) {
- tclass = MLXSW_SP_PRIO_BAND_TO_TCLASS(band);
+ int tclass_num;
+
child_qdisc = &mlxsw_sp_qdisc->qdiscs[band];
- old_priomap = child_qdisc->prio_bitmap;
- child_qdisc->prio_bitmap = 0;
+ ets_band = &ets_data->bands[band];
+
+ tclass_num = ets_band->tclass_num;
+ old_priomap = ets_band->prio_bitmap;
+ new_priomap = 0;
err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
MLXSW_REG_QEEC_HR_SUBGROUP,
- tclass, 0, !!quanta[band],
+ tclass_num, 0, !!quanta[band],
weights[band]);
if (err)
return err;
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
if (priomap[i] == band) {
- child_qdisc->prio_bitmap |= BIT(i);
+ new_priomap |= BIT(i);
if (BIT(i) & old_priomap)
continue;
err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port,
- i, tclass);
+ i, tclass_num);
if (err)
return err;
}
}
- child_qdisc->tclass_num = tclass;
+ ets_band->prio_bitmap = new_priomap;
- if (old_priomap != child_qdisc->prio_bitmap &&
- child_qdisc->ops && child_qdisc->ops->clean_stats) {
- backlog = child_qdisc->stats_base.backlog;
- child_qdisc->ops->clean_stats(mlxsw_sp_port,
- child_qdisc);
- child_qdisc->stats_base.backlog = backlog;
- }
+ if (old_priomap != new_priomap)
+ mlxsw_sp_qdisc_tree_clean_stats(mlxsw_sp_port,
+ child_qdisc);
- if (handle == qdisc_state->future_handle &&
- qdisc_state->future_fifos[band]) {
- err = mlxsw_sp_qdisc_replace(mlxsw_sp_port, TC_H_UNSPEC,
- child_qdisc,
- &mlxsw_sp_qdisc_ops_fifo,
- NULL);
- if (err)
- return err;
- }
+ err = mlxsw_sp_qdisc_future_fifo_replace(mlxsw_sp_port, handle,
+ band, child_qdisc);
+ if (err)
+ return err;
}
for (; band < IEEE_8021QAZ_MAX_TCS; band++) {
- tclass = MLXSW_SP_PRIO_BAND_TO_TCLASS(band);
+ ets_band = &ets_data->bands[band];
+ ets_band->prio_bitmap = 0;
+
child_qdisc = &mlxsw_sp_qdisc->qdiscs[band];
- child_qdisc->prio_bitmap = 0;
mlxsw_sp_qdisc_destroy(mlxsw_sp_port, child_qdisc);
+
mlxsw_sp_port_ets_set(mlxsw_sp_port,
MLXSW_REG_QEEC_HR_SUBGROUP,
- tclass, 0, false, 0);
+ ets_band->tclass_num, 0, false, 0);
}
- qdisc_state->future_handle = TC_H_UNSPEC;
- memset(qdisc_state->future_fifos, 0, sizeof(qdisc_state->future_fifos));
+ mlxsw_sp_qdisc_future_fifos_init(mlxsw_sp_port, TC_H_UNSPEC);
return 0;
}
@@ -1238,6 +1480,31 @@ mlxsw_sp_qdisc_prio_find_class(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
return &mlxsw_sp_qdisc->qdiscs[band];
}
+static struct mlxsw_sp_qdisc_ets_band *
+mlxsw_sp_qdisc_ets_get_band(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ struct mlxsw_sp_qdisc *child)
+{
+ unsigned int band = child - mlxsw_sp_qdisc->qdiscs;
+
+ if (WARN_ON(band >= IEEE_8021QAZ_MAX_TCS))
+ band = 0;
+ return &mlxsw_sp_qdisc->ets_data->bands[band];
+}
+
+static u8
+mlxsw_sp_qdisc_ets_get_prio_bitmap(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ struct mlxsw_sp_qdisc *child)
+{
+ return mlxsw_sp_qdisc_ets_get_band(mlxsw_sp_qdisc, child)->prio_bitmap;
+}
+
+static int
+mlxsw_sp_qdisc_ets_get_tclass_num(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ struct mlxsw_sp_qdisc *child)
+{
+ return mlxsw_sp_qdisc_ets_get_band(mlxsw_sp_qdisc, child)->tclass_num;
+}
+
static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_prio = {
.type = MLXSW_SP_QDISC_PRIO,
.check_params = mlxsw_sp_qdisc_prio_check_params,
@@ -1248,6 +1515,8 @@ static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_prio = {
.clean_stats = mlxsw_sp_setup_tc_qdisc_prio_clean_stats,
.find_class = mlxsw_sp_qdisc_prio_find_class,
.num_classes = IEEE_8021QAZ_MAX_TCS,
+ .get_prio_bitmap = mlxsw_sp_qdisc_ets_get_prio_bitmap,
+ .get_tclass_num = mlxsw_sp_qdisc_ets_get_tclass_num,
};
static int
@@ -1299,6 +1568,8 @@ static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_ets = {
.clean_stats = mlxsw_sp_setup_tc_qdisc_prio_clean_stats,
.find_class = mlxsw_sp_qdisc_prio_find_class,
.num_classes = IEEE_8021QAZ_MAX_TCS,
+ .get_prio_bitmap = mlxsw_sp_qdisc_ets_get_prio_bitmap,
+ .get_tclass_num = mlxsw_sp_qdisc_ets_get_tclass_num,
};
/* Linux allows linking of Qdiscs to arbitrary classes (so long as the resulting
@@ -1326,10 +1597,9 @@ static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_ets = {
* grafted corresponds to the parent handle. If the two don't match, we
* unoffload the child.
*/
-static int
-__mlxsw_sp_qdisc_ets_graft(struct mlxsw_sp_port *mlxsw_sp_port,
- struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
- u8 band, u32 child_handle)
+static int mlxsw_sp_qdisc_graft(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ u8 band, u32 child_handle)
{
struct mlxsw_sp_qdisc *old_qdisc;
u32 parent;
@@ -1362,21 +1632,12 @@ __mlxsw_sp_qdisc_ets_graft(struct mlxsw_sp_port *mlxsw_sp_port,
return -EOPNOTSUPP;
}
-static int
-mlxsw_sp_qdisc_prio_graft(struct mlxsw_sp_port *mlxsw_sp_port,
- struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
- struct tc_prio_qopt_offload_graft_params *p)
-{
- return __mlxsw_sp_qdisc_ets_graft(mlxsw_sp_port, mlxsw_sp_qdisc,
- p->band, p->child_handle);
-}
-
static int __mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port,
struct tc_prio_qopt_offload *p)
{
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
- mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent, true);
+ mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent);
if (!mlxsw_sp_qdisc)
return -EOPNOTSUPP;
@@ -1396,8 +1657,9 @@ static int __mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port,
return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
&p->stats);
case TC_PRIO_GRAFT:
- return mlxsw_sp_qdisc_prio_graft(mlxsw_sp_port, mlxsw_sp_qdisc,
- &p->graft_params);
+ return mlxsw_sp_qdisc_graft(mlxsw_sp_port, mlxsw_sp_qdisc,
+ p->graft_params.band,
+ p->graft_params.child_handle);
default:
return -EOPNOTSUPP;
}
@@ -1420,7 +1682,7 @@ static int __mlxsw_sp_setup_tc_ets(struct mlxsw_sp_port *mlxsw_sp_port,
{
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
- mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent, true);
+ mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent);
if (!mlxsw_sp_qdisc)
return -EOPNOTSUPP;
@@ -1440,9 +1702,9 @@ static int __mlxsw_sp_setup_tc_ets(struct mlxsw_sp_port *mlxsw_sp_port,
return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
&p->stats);
case TC_ETS_GRAFT:
- return __mlxsw_sp_qdisc_ets_graft(mlxsw_sp_port, mlxsw_sp_qdisc,
- p->graft_params.band,
- p->graft_params.child_handle);
+ return mlxsw_sp_qdisc_graft(mlxsw_sp_port, mlxsw_sp_qdisc,
+ p->graft_params.band,
+ p->graft_params.child_handle);
default:
return -EOPNOTSUPP;
}
@@ -1472,6 +1734,7 @@ struct mlxsw_sp_qevent_binding {
u32 handle;
int tclass_num;
enum mlxsw_sp_span_trigger span_trigger;
+ unsigned int action_mask;
};
static LIST_HEAD(mlxsw_sp_qevent_block_cb_list);
@@ -1482,8 +1745,10 @@ static int mlxsw_sp_qevent_span_configure(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_span_agent_parms *agent_parms,
int *p_span_id)
{
+ enum mlxsw_sp_span_trigger span_trigger = qevent_binding->span_trigger;
struct mlxsw_sp_port *mlxsw_sp_port = qevent_binding->mlxsw_sp_port;
struct mlxsw_sp_span_trigger_parms trigger_parms = {};
+ bool ingress;
int span_id;
int err;
@@ -1491,18 +1756,19 @@ static int mlxsw_sp_qevent_span_configure(struct mlxsw_sp *mlxsw_sp,
if (err)
return err;
- err = mlxsw_sp_span_analyzed_port_get(mlxsw_sp_port, true);
+ ingress = mlxsw_sp_span_trigger_is_ingress(span_trigger);
+ err = mlxsw_sp_span_analyzed_port_get(mlxsw_sp_port, ingress);
if (err)
goto err_analyzed_port_get;
trigger_parms.span_id = span_id;
trigger_parms.probability_rate = 1;
- err = mlxsw_sp_span_agent_bind(mlxsw_sp, qevent_binding->span_trigger, mlxsw_sp_port,
+ err = mlxsw_sp_span_agent_bind(mlxsw_sp, span_trigger, mlxsw_sp_port,
&trigger_parms);
if (err)
goto err_agent_bind;
- err = mlxsw_sp_span_trigger_enable(mlxsw_sp_port, qevent_binding->span_trigger,
+ err = mlxsw_sp_span_trigger_enable(mlxsw_sp_port, span_trigger,
qevent_binding->tclass_num);
if (err)
goto err_trigger_enable;
@@ -1511,10 +1777,10 @@ static int mlxsw_sp_qevent_span_configure(struct mlxsw_sp *mlxsw_sp,
return 0;
err_trigger_enable:
- mlxsw_sp_span_agent_unbind(mlxsw_sp, qevent_binding->span_trigger, mlxsw_sp_port,
+ mlxsw_sp_span_agent_unbind(mlxsw_sp, span_trigger, mlxsw_sp_port,
&trigger_parms);
err_agent_bind:
- mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, true);
+ mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, ingress);
err_analyzed_port_get:
mlxsw_sp_span_agent_put(mlxsw_sp, span_id);
return err;
@@ -1524,16 +1790,20 @@ static void mlxsw_sp_qevent_span_deconfigure(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_qevent_binding *qevent_binding,
int span_id)
{
+ enum mlxsw_sp_span_trigger span_trigger = qevent_binding->span_trigger;
struct mlxsw_sp_port *mlxsw_sp_port = qevent_binding->mlxsw_sp_port;
struct mlxsw_sp_span_trigger_parms trigger_parms = {
.span_id = span_id,
};
+ bool ingress;
- mlxsw_sp_span_trigger_disable(mlxsw_sp_port, qevent_binding->span_trigger,
+ ingress = mlxsw_sp_span_trigger_is_ingress(span_trigger);
+
+ mlxsw_sp_span_trigger_disable(mlxsw_sp_port, span_trigger,
qevent_binding->tclass_num);
- mlxsw_sp_span_agent_unbind(mlxsw_sp, qevent_binding->span_trigger, mlxsw_sp_port,
+ mlxsw_sp_span_agent_unbind(mlxsw_sp, span_trigger, mlxsw_sp_port,
&trigger_parms);
- mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, true);
+ mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, ingress);
mlxsw_sp_span_agent_put(mlxsw_sp, span_id);
}
@@ -1583,10 +1853,17 @@ static void mlxsw_sp_qevent_trap_deconfigure(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_qevent_span_deconfigure(mlxsw_sp, qevent_binding, mall_entry->trap.span_id);
}
-static int mlxsw_sp_qevent_entry_configure(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_mall_entry *mall_entry,
- struct mlxsw_sp_qevent_binding *qevent_binding)
+static int
+mlxsw_sp_qevent_entry_configure(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_mall_entry *mall_entry,
+ struct mlxsw_sp_qevent_binding *qevent_binding,
+ struct netlink_ext_ack *extack)
{
+ if (!(BIT(mall_entry->type) & qevent_binding->action_mask)) {
+ NL_SET_ERR_MSG(extack, "Action not supported at this qevent");
+ return -EOPNOTSUPP;
+ }
+
switch (mall_entry->type) {
case MLXSW_SP_MALL_ACTION_TYPE_MIRROR:
return mlxsw_sp_qevent_mirror_configure(mlxsw_sp, mall_entry, qevent_binding);
@@ -1614,15 +1891,17 @@ static void mlxsw_sp_qevent_entry_deconfigure(struct mlxsw_sp *mlxsw_sp,
}
}
-static int mlxsw_sp_qevent_binding_configure(struct mlxsw_sp_qevent_block *qevent_block,
- struct mlxsw_sp_qevent_binding *qevent_binding)
+static int
+mlxsw_sp_qevent_binding_configure(struct mlxsw_sp_qevent_block *qevent_block,
+ struct mlxsw_sp_qevent_binding *qevent_binding,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sp_mall_entry *mall_entry;
int err;
list_for_each_entry(mall_entry, &qevent_block->mall_entry_list, list) {
err = mlxsw_sp_qevent_entry_configure(qevent_block->mlxsw_sp, mall_entry,
- qevent_binding);
+ qevent_binding, extack);
if (err)
goto err_entry_configure;
}
@@ -1646,13 +1925,17 @@ static void mlxsw_sp_qevent_binding_deconfigure(struct mlxsw_sp_qevent_block *qe
qevent_binding);
}
-static int mlxsw_sp_qevent_block_configure(struct mlxsw_sp_qevent_block *qevent_block)
+static int
+mlxsw_sp_qevent_block_configure(struct mlxsw_sp_qevent_block *qevent_block,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sp_qevent_binding *qevent_binding;
int err;
list_for_each_entry(qevent_binding, &qevent_block->binding_list, list) {
- err = mlxsw_sp_qevent_binding_configure(qevent_block, qevent_binding);
+ err = mlxsw_sp_qevent_binding_configure(qevent_block,
+ qevent_binding,
+ extack);
if (err)
goto err_binding_configure;
}
@@ -1737,7 +2020,7 @@ static int mlxsw_sp_qevent_mall_replace(struct mlxsw_sp *mlxsw_sp,
list_add_tail(&mall_entry->list, &qevent_block->mall_entry_list);
- err = mlxsw_sp_qevent_block_configure(qevent_block);
+ err = mlxsw_sp_qevent_block_configure(qevent_block, f->common.extack);
if (err)
goto err_block_configure;
@@ -1825,7 +2108,8 @@ static void mlxsw_sp_qevent_block_release(void *cb_priv)
static struct mlxsw_sp_qevent_binding *
mlxsw_sp_qevent_binding_create(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle, int tclass_num,
- enum mlxsw_sp_span_trigger span_trigger)
+ enum mlxsw_sp_span_trigger span_trigger,
+ unsigned int action_mask)
{
struct mlxsw_sp_qevent_binding *binding;
@@ -1837,6 +2121,7 @@ mlxsw_sp_qevent_binding_create(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
binding->handle = handle;
binding->tclass_num = tclass_num;
binding->span_trigger = span_trigger;
+ binding->action_mask = action_mask;
return binding;
}
@@ -1862,9 +2147,11 @@ mlxsw_sp_qevent_binding_lookup(struct mlxsw_sp_qevent_block *block,
return NULL;
}
-static int mlxsw_sp_setup_tc_block_qevent_bind(struct mlxsw_sp_port *mlxsw_sp_port,
- struct flow_block_offload *f,
- enum mlxsw_sp_span_trigger span_trigger)
+static int
+mlxsw_sp_setup_tc_block_qevent_bind(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct flow_block_offload *f,
+ enum mlxsw_sp_span_trigger span_trigger,
+ unsigned int action_mask)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_qevent_binding *qevent_binding;
@@ -1872,6 +2159,7 @@ static int mlxsw_sp_setup_tc_block_qevent_bind(struct mlxsw_sp_port *mlxsw_sp_po
struct flow_block_cb *block_cb;
struct mlxsw_sp_qdisc *qdisc;
bool register_block = false;
+ int tclass_num;
int err;
block_cb = flow_block_cb_lookup(f->block, mlxsw_sp_qevent_block_cb, mlxsw_sp);
@@ -1904,14 +2192,19 @@ static int mlxsw_sp_setup_tc_block_qevent_bind(struct mlxsw_sp_port *mlxsw_sp_po
goto err_binding_exists;
}
- qevent_binding = mlxsw_sp_qevent_binding_create(mlxsw_sp_port, f->sch->handle,
- qdisc->tclass_num, span_trigger);
+ tclass_num = mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port, qdisc);
+ qevent_binding = mlxsw_sp_qevent_binding_create(mlxsw_sp_port,
+ f->sch->handle,
+ tclass_num,
+ span_trigger,
+ action_mask);
if (IS_ERR(qevent_binding)) {
err = PTR_ERR(qevent_binding);
goto err_binding_create;
}
- err = mlxsw_sp_qevent_binding_configure(qevent_block, qevent_binding);
+ err = mlxsw_sp_qevent_binding_configure(qevent_block, qevent_binding,
+ f->extack);
if (err)
goto err_binding_configure;
@@ -1963,15 +2256,19 @@ static void mlxsw_sp_setup_tc_block_qevent_unbind(struct mlxsw_sp_port *mlxsw_sp
}
}
-static int mlxsw_sp_setup_tc_block_qevent(struct mlxsw_sp_port *mlxsw_sp_port,
- struct flow_block_offload *f,
- enum mlxsw_sp_span_trigger span_trigger)
+static int
+mlxsw_sp_setup_tc_block_qevent(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct flow_block_offload *f,
+ enum mlxsw_sp_span_trigger span_trigger,
+ unsigned int action_mask)
{
f->driver_block_list = &mlxsw_sp_qevent_block_cb_list;
switch (f->command) {
case FLOW_BLOCK_BIND:
- return mlxsw_sp_setup_tc_block_qevent_bind(mlxsw_sp_port, f, span_trigger);
+ return mlxsw_sp_setup_tc_block_qevent_bind(mlxsw_sp_port, f,
+ span_trigger,
+ action_mask);
case FLOW_BLOCK_UNBIND:
mlxsw_sp_setup_tc_block_qevent_unbind(mlxsw_sp_port, f, span_trigger);
return 0;
@@ -1983,7 +2280,22 @@ static int mlxsw_sp_setup_tc_block_qevent(struct mlxsw_sp_port *mlxsw_sp_port,
int mlxsw_sp_setup_tc_block_qevent_early_drop(struct mlxsw_sp_port *mlxsw_sp_port,
struct flow_block_offload *f)
{
- return mlxsw_sp_setup_tc_block_qevent(mlxsw_sp_port, f, MLXSW_SP_SPAN_TRIGGER_EARLY_DROP);
+ unsigned int action_mask = BIT(MLXSW_SP_MALL_ACTION_TYPE_MIRROR) |
+ BIT(MLXSW_SP_MALL_ACTION_TYPE_TRAP);
+
+ return mlxsw_sp_setup_tc_block_qevent(mlxsw_sp_port, f,
+ MLXSW_SP_SPAN_TRIGGER_EARLY_DROP,
+ action_mask);
+}
+
+int mlxsw_sp_setup_tc_block_qevent_mark(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct flow_block_offload *f)
+{
+ unsigned int action_mask = BIT(MLXSW_SP_MALL_ACTION_TYPE_MIRROR);
+
+ return mlxsw_sp_setup_tc_block_qevent(mlxsw_sp_port, f,
+ MLXSW_SP_SPAN_TRIGGER_ECN,
+ action_mask);
}
int mlxsw_sp_tc_qdisc_init(struct mlxsw_sp_port *mlxsw_sp_port)
@@ -1995,8 +2307,6 @@ int mlxsw_sp_tc_qdisc_init(struct mlxsw_sp_port *mlxsw_sp_port)
return -ENOMEM;
mutex_init(&qdisc_state->lock);
- qdisc_state->root_qdisc.prio_bitmap = 0xff;
- qdisc_state->root_qdisc.tclass_num = MLXSW_SP_PORT_DEFAULT_TCLASS;
mlxsw_sp_port->qdisc = qdisc_state;
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 19bb3ca0515e..1e141b5944cd 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -115,6 +115,7 @@ struct mlxsw_sp_rif_ops {
struct mlxsw_sp_router_ops {
int (*init)(struct mlxsw_sp *mlxsw_sp);
+ int (*ipips_init)(struct mlxsw_sp *mlxsw_sp);
};
static struct mlxsw_sp_rif *
@@ -1055,22 +1056,13 @@ static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp)
kfree(mlxsw_sp->router->vrs);
}
-static struct net_device *
-__mlxsw_sp_ipip_netdev_ul_dev_get(const struct net_device *ol_dev)
-{
- struct ip_tunnel *tun = netdev_priv(ol_dev);
- struct net *net = dev_net(ol_dev);
-
- return dev_get_by_index_rcu(net, tun->parms.link);
-}
-
u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev)
{
struct net_device *d;
u32 tb_id;
rcu_read_lock();
- d = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
+ d = mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
if (d)
tb_id = l3mdev_fib_table(d) ? : RT_TABLE_MAIN;
else
@@ -1116,6 +1108,7 @@ mlxsw_sp_ipip_entry_alloc(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_ipip_ops *ipip_ops;
struct mlxsw_sp_ipip_entry *ipip_entry;
struct mlxsw_sp_ipip_entry *ret = NULL;
+ int err;
ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
ipip_entry = kzalloc(sizeof(*ipip_entry), GFP_KERNEL);
@@ -1131,26 +1124,30 @@ mlxsw_sp_ipip_entry_alloc(struct mlxsw_sp *mlxsw_sp,
ipip_entry->ipipt = ipipt;
ipip_entry->ol_dev = ol_dev;
+ ipip_entry->parms = ipip_ops->parms_init(ol_dev);
- switch (ipip_ops->ul_proto) {
- case MLXSW_SP_L3_PROTO_IPV4:
- ipip_entry->parms4 = mlxsw_sp_ipip_netdev_parms4(ol_dev);
- break;
- case MLXSW_SP_L3_PROTO_IPV6:
- WARN_ON(1);
- break;
+ err = ipip_ops->rem_ip_addr_set(mlxsw_sp, ipip_entry);
+ if (err) {
+ ret = ERR_PTR(err);
+ goto err_rem_ip_addr_set;
}
return ipip_entry;
+err_rem_ip_addr_set:
+ mlxsw_sp_rif_destroy(&ipip_entry->ol_lb->common);
err_ol_ipip_lb_create:
kfree(ipip_entry);
return ret;
}
-static void
-mlxsw_sp_ipip_entry_dealloc(struct mlxsw_sp_ipip_entry *ipip_entry)
+static void mlxsw_sp_ipip_entry_dealloc(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry)
{
+ const struct mlxsw_sp_ipip_ops *ipip_ops =
+ mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
+
+ ipip_ops->rem_ip_addr_unset(mlxsw_sp, ipip_entry);
mlxsw_sp_rif_destroy(&ipip_entry->ol_lb->common);
kfree(ipip_entry);
}
@@ -1174,6 +1171,32 @@ mlxsw_sp_ipip_entry_saddr_matches(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_l3addr_eq(&tun_saddr, &saddr);
}
+static int mlxsw_sp_ipip_decap_parsing_depth_inc(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_ipip_type ipipt)
+{
+ const struct mlxsw_sp_ipip_ops *ipip_ops;
+
+ ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
+
+ /* Not all tunnels require to increase the default pasing depth
+ * (96 bytes).
+ */
+ if (ipip_ops->inc_parsing_depth)
+ return mlxsw_sp_parsing_depth_inc(mlxsw_sp);
+
+ return 0;
+}
+
+static void mlxsw_sp_ipip_decap_parsing_depth_dec(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_ipip_type ipipt)
+{
+ const struct mlxsw_sp_ipip_ops *ipip_ops =
+ mlxsw_sp->router->ipip_ops_arr[ipipt];
+
+ if (ipip_ops->inc_parsing_depth)
+ mlxsw_sp_parsing_depth_dec(mlxsw_sp);
+}
+
static int
mlxsw_sp_fib_entry_decap_init(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_entry *fib_entry,
@@ -1187,18 +1210,32 @@ mlxsw_sp_fib_entry_decap_init(struct mlxsw_sp *mlxsw_sp,
if (err)
return err;
+ err = mlxsw_sp_ipip_decap_parsing_depth_inc(mlxsw_sp,
+ ipip_entry->ipipt);
+ if (err)
+ goto err_parsing_depth_inc;
+
ipip_entry->decap_fib_entry = fib_entry;
fib_entry->decap.ipip_entry = ipip_entry;
fib_entry->decap.tunnel_index = tunnel_index;
+
return 0;
+
+err_parsing_depth_inc:
+ mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
+ fib_entry->decap.tunnel_index);
+ return err;
}
static void mlxsw_sp_fib_entry_decap_fini(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_entry *fib_entry)
{
+ enum mlxsw_sp_ipip_type ipipt = fib_entry->decap.ipip_entry->ipipt;
+
/* Unlink this node from the IPIP entry that it's the decap entry of. */
fib_entry->decap.ipip_entry->decap_fib_entry = NULL;
fib_entry->decap.ipip_entry = NULL;
+ mlxsw_sp_ipip_decap_parsing_depth_dec(mlxsw_sp, ipipt);
mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
1, fib_entry->decap.tunnel_index);
}
@@ -1309,6 +1346,11 @@ mlxsw_sp_ipip_entry_find_decap(struct mlxsw_sp *mlxsw_sp,
saddr_len = 4;
saddr_prefix_len = 32;
break;
+ case MLXSW_SP_L3_PROTO_IPV6:
+ saddrp = &saddr.addr6;
+ saddr_len = 16;
+ saddr_prefix_len = 128;
+ break;
default:
WARN_ON(1);
return NULL;
@@ -1345,7 +1387,7 @@ mlxsw_sp_ipip_entry_destroy(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_ipip_entry *ipip_entry)
{
list_del(&ipip_entry->ipip_list_node);
- mlxsw_sp_ipip_entry_dealloc(ipip_entry);
+ mlxsw_sp_ipip_entry_dealloc(mlxsw_sp, ipip_entry);
}
static bool
@@ -1450,7 +1492,7 @@ mlxsw_sp_ipip_entry_find_by_ul_dev(const struct mlxsw_sp *mlxsw_sp,
struct net_device *ipip_ul_dev;
rcu_read_lock();
- ipip_ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
+ ipip_ul_dev = mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
rcu_read_unlock();
if (ipip_ul_dev == ul_dev)
@@ -1536,23 +1578,34 @@ mlxsw_sp_rif_ipip_lb_op(struct mlxsw_sp_rif_ipip_lb *lb_rif, u16 ul_vr_id,
u16 ul_rif_id, bool enable)
{
struct mlxsw_sp_rif_ipip_lb_config lb_cf = lb_rif->lb_config;
+ enum mlxsw_reg_ritr_loopback_ipip_options ipip_options;
struct mlxsw_sp_rif *rif = &lb_rif->common;
struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
char ritr_pl[MLXSW_REG_RITR_LEN];
+ struct in6_addr *saddr6;
u32 saddr4;
+ ipip_options = MLXSW_REG_RITR_LOOPBACK_IPIP_OPTIONS_GRE_KEY_PRESET;
switch (lb_cf.ul_protocol) {
case MLXSW_SP_L3_PROTO_IPV4:
saddr4 = be32_to_cpu(lb_cf.saddr.addr4);
mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
rif->rif_index, rif->vr_id, rif->dev->mtu);
mlxsw_reg_ritr_loopback_ipip4_pack(ritr_pl, lb_cf.lb_ipipt,
- MLXSW_REG_RITR_LOOPBACK_IPIP_OPTIONS_GRE_KEY_PRESET,
- ul_vr_id, ul_rif_id, saddr4, lb_cf.okey);
+ ipip_options, ul_vr_id,
+ ul_rif_id, saddr4,
+ lb_cf.okey);
break;
case MLXSW_SP_L3_PROTO_IPV6:
- return -EAFNOSUPPORT;
+ saddr6 = &lb_cf.saddr.addr6;
+ mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
+ rif->rif_index, rif->vr_id, rif->dev->mtu);
+ mlxsw_reg_ritr_loopback_ipip6_pack(ritr_pl, lb_cf.lb_ipipt,
+ ipip_options, ul_vr_id,
+ ul_rif_id, saddr6,
+ lb_cf.okey);
+ break;
}
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
@@ -1827,7 +1880,7 @@ static void mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(struct mlxsw_sp *mlxsw_sp,
struct net_device *ipip_ul_dev;
rcu_read_lock();
- ipip_ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
+ ipip_ul_dev = mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
rcu_read_unlock();
if (ipip_ul_dev == ul_dev)
mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
@@ -4152,7 +4205,7 @@ static bool mlxsw_sp_ipip_netdev_ul_up(struct net_device *ol_dev)
bool is_up;
rcu_read_lock();
- ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
+ ul_dev = mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
is_up = ul_dev ? (ul_dev->flags & IFF_UP) : true;
rcu_read_unlock();
@@ -4376,6 +4429,66 @@ static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
}
}
+static int mlxsw_sp_adj_trap_entry_init(struct mlxsw_sp *mlxsw_sp)
+{
+ enum mlxsw_reg_ratr_trap_action trap_action;
+ char ratr_pl[MLXSW_REG_RATR_LEN];
+ int err;
+
+ err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
+ &mlxsw_sp->router->adj_trap_index);
+ if (err)
+ return err;
+
+ trap_action = MLXSW_REG_RATR_TRAP_ACTION_TRAP;
+ mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY, true,
+ MLXSW_REG_RATR_TYPE_ETHERNET,
+ mlxsw_sp->router->adj_trap_index,
+ mlxsw_sp->router->lb_rif_index);
+ mlxsw_reg_ratr_trap_action_set(ratr_pl, trap_action);
+ mlxsw_reg_ratr_trap_id_set(ratr_pl, MLXSW_TRAP_ID_RTR_EGRESS0);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
+ if (err)
+ goto err_ratr_write;
+
+ return 0;
+
+err_ratr_write:
+ mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
+ mlxsw_sp->router->adj_trap_index);
+ return err;
+}
+
+static void mlxsw_sp_adj_trap_entry_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
+ mlxsw_sp->router->adj_trap_index);
+}
+
+static int mlxsw_sp_nexthop_group_inc(struct mlxsw_sp *mlxsw_sp)
+{
+ int err;
+
+ if (refcount_inc_not_zero(&mlxsw_sp->router->num_groups))
+ return 0;
+
+ err = mlxsw_sp_adj_trap_entry_init(mlxsw_sp);
+ if (err)
+ return err;
+
+ refcount_set(&mlxsw_sp->router->num_groups, 1);
+
+ return 0;
+}
+
+static void mlxsw_sp_nexthop_group_dec(struct mlxsw_sp *mlxsw_sp)
+{
+ if (!refcount_dec_and_test(&mlxsw_sp->router->num_groups))
+ return;
+
+ mlxsw_sp_adj_trap_entry_fini(mlxsw_sp);
+}
+
static void
mlxsw_sp_nh_grp_activity_get(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_nexthop_group *nh_grp,
@@ -4790,6 +4903,9 @@ mlxsw_sp_nexthop_obj_group_info_init(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_nexthop_obj_init;
}
+ err = mlxsw_sp_nexthop_group_inc(mlxsw_sp);
+ if (err)
+ goto err_group_inc;
err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
if (err) {
NL_SET_ERR_MSG_MOD(info->extack, "Failed to write adjacency entries to the device");
@@ -4808,6 +4924,8 @@ mlxsw_sp_nexthop_obj_group_info_init(struct mlxsw_sp *mlxsw_sp,
return 0;
err_group_refresh:
+ mlxsw_sp_nexthop_group_dec(mlxsw_sp);
+err_group_inc:
i = nhgi->count;
err_nexthop_obj_init:
for (i--; i >= 0; i--) {
@@ -4832,6 +4950,7 @@ mlxsw_sp_nexthop_obj_group_info_fini(struct mlxsw_sp *mlxsw_sp,
cancel_delayed_work(&router->nh_grp_activity_dw);
}
+ mlxsw_sp_nexthop_group_dec(mlxsw_sp);
for (i = nhgi->count - 1; i >= 0; i--) {
struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
@@ -5223,6 +5342,9 @@ mlxsw_sp_nexthop4_group_info_init(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_nexthop4_init;
}
+ err = mlxsw_sp_nexthop_group_inc(mlxsw_sp);
+ if (err)
+ goto err_group_inc;
err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
if (err)
goto err_group_refresh;
@@ -5230,6 +5352,8 @@ mlxsw_sp_nexthop4_group_info_init(struct mlxsw_sp *mlxsw_sp,
return 0;
err_group_refresh:
+ mlxsw_sp_nexthop_group_dec(mlxsw_sp);
+err_group_inc:
i = nhgi->count;
err_nexthop4_init:
for (i--; i >= 0; i--) {
@@ -5247,6 +5371,7 @@ mlxsw_sp_nexthop4_group_info_fini(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
int i;
+ mlxsw_sp_nexthop_group_dec(mlxsw_sp);
for (i = nhgi->count - 1; i >= 0; i--) {
struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
@@ -5725,41 +5850,6 @@ static int mlxsw_sp_fib_entry_commit(struct mlxsw_sp *mlxsw_sp,
return err;
}
-static int mlxsw_sp_adj_discard_write(struct mlxsw_sp *mlxsw_sp)
-{
- enum mlxsw_reg_ratr_trap_action trap_action;
- char ratr_pl[MLXSW_REG_RATR_LEN];
- int err;
-
- if (mlxsw_sp->router->adj_discard_index_valid)
- return 0;
-
- err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
- &mlxsw_sp->router->adj_discard_index);
- if (err)
- return err;
-
- trap_action = MLXSW_REG_RATR_TRAP_ACTION_TRAP;
- mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY, true,
- MLXSW_REG_RATR_TYPE_ETHERNET,
- mlxsw_sp->router->adj_discard_index,
- mlxsw_sp->router->lb_rif_index);
- mlxsw_reg_ratr_trap_action_set(ratr_pl, trap_action);
- mlxsw_reg_ratr_trap_id_set(ratr_pl, MLXSW_TRAP_ID_RTR_EGRESS0);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
- if (err)
- goto err_ratr_write;
-
- mlxsw_sp->router->adj_discard_index_valid = true;
-
- return 0;
-
-err_ratr_write:
- mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
- mlxsw_sp->router->adj_discard_index);
- return err;
-}
-
static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib_entry *fib_entry,
@@ -5772,7 +5862,6 @@ static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp,
u16 trap_id = 0;
u32 adjacency_index = 0;
u16 ecmp_size = 0;
- int err;
/* In case the nexthop group adjacency index is valid, use it
* with provided ECMP size. Otherwise, setup trap and pass
@@ -5783,11 +5872,8 @@ static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp,
adjacency_index = nhgi->adj_index;
ecmp_size = nhgi->ecmp_size;
} else if (!nhgi->adj_index_valid && nhgi->count && nhgi->nh_rif) {
- err = mlxsw_sp_adj_discard_write(mlxsw_sp);
- if (err)
- return err;
trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
- adjacency_index = mlxsw_sp->router->adj_discard_index;
+ adjacency_index = mlxsw_sp->router->adj_trap_index;
ecmp_size = 1;
} else {
trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
@@ -6036,8 +6122,8 @@ mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
}
static void
-mlxsw_sp_fib4_entry_type_unset(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry *fib_entry)
+mlxsw_sp_fib_entry_type_unset(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry)
{
switch (fib_entry->type) {
case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
@@ -6048,6 +6134,13 @@ mlxsw_sp_fib4_entry_type_unset(struct mlxsw_sp *mlxsw_sp,
}
}
+static void
+mlxsw_sp_fib4_entry_type_unset(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib4_entry *fib4_entry)
+{
+ mlxsw_sp_fib_entry_type_unset(mlxsw_sp, &fib4_entry->common);
+}
+
static struct mlxsw_sp_fib4_entry *
mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_node *fib_node,
@@ -6108,7 +6201,7 @@ static void mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_node *fib_node = fib4_entry->common.fib_node;
fib_info_put(fib4_entry->fi);
- mlxsw_sp_fib4_entry_type_unset(mlxsw_sp, &fib4_entry->common);
+ mlxsw_sp_fib4_entry_type_unset(mlxsw_sp, fib4_entry);
mlxsw_sp_nexthop_group_vr_unlink(fib4_entry->common.nh_group,
fib_node->fib);
mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common);
@@ -6641,6 +6734,9 @@ mlxsw_sp_nexthop6_group_info_init(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_rt6 = list_next_entry(mlxsw_sp_rt6, list);
}
nh_grp->nhgi = nhgi;
+ err = mlxsw_sp_nexthop_group_inc(mlxsw_sp);
+ if (err)
+ goto err_group_inc;
err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
if (err)
goto err_group_refresh;
@@ -6648,6 +6744,8 @@ mlxsw_sp_nexthop6_group_info_init(struct mlxsw_sp *mlxsw_sp,
return 0;
err_group_refresh:
+ mlxsw_sp_nexthop_group_dec(mlxsw_sp);
+err_group_inc:
i = nhgi->count;
err_nexthop6_init:
for (i--; i >= 0; i--) {
@@ -6665,6 +6763,7 @@ mlxsw_sp_nexthop6_group_info_fini(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
int i;
+ mlxsw_sp_nexthop_group_dec(mlxsw_sp);
for (i = nhgi->count - 1; i >= 0; i--) {
struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
@@ -6888,11 +6987,38 @@ mlxsw_sp_fib6_entry_nexthop_del(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_nexthop6_group_update(mlxsw_sp, op_ctx, fib6_entry);
}
-static void mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry *fib_entry,
- const struct fib6_info *rt)
+static int
+mlxsw_sp_fib6_entry_type_set_local(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry,
+ const struct fib6_info *rt)
{
- if (rt->fib6_flags & (RTF_LOCAL | RTF_ANYCAST))
+ struct mlxsw_sp_nexthop_group_info *nhgi = fib_entry->nh_group->nhgi;
+ union mlxsw_sp_l3addr dip = { .addr6 = rt->fib6_dst.addr };
+ int ifindex = nhgi->nexthops[0].ifindex;
+ struct mlxsw_sp_ipip_entry *ipip_entry;
+
+ fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
+ ipip_entry = mlxsw_sp_ipip_entry_find_by_decap(mlxsw_sp, ifindex,
+ MLXSW_SP_L3_PROTO_IPV6,
+ dip);
+
+ if (ipip_entry && ipip_entry->ol_dev->flags & IFF_UP) {
+ fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
+ return mlxsw_sp_fib_entry_decap_init(mlxsw_sp, fib_entry,
+ ipip_entry);
+ }
+
+ return 0;
+}
+
+static int mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry,
+ const struct fib6_info *rt)
+{
+ if (rt->fib6_flags & RTF_LOCAL)
+ return mlxsw_sp_fib6_entry_type_set_local(mlxsw_sp, fib_entry,
+ rt);
+ if (rt->fib6_flags & RTF_ANYCAST)
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
else if (rt->fib6_type == RTN_BLACKHOLE)
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE;
@@ -6902,6 +7028,8 @@ static void mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp *mlxsw_sp,
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
else
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
+
+ return 0;
}
static void
@@ -6959,12 +7087,16 @@ mlxsw_sp_fib6_entry_create(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_nexthop_group_vr_link;
- mlxsw_sp_fib6_entry_type_set(mlxsw_sp, fib_entry, rt_arr[0]);
+ err = mlxsw_sp_fib6_entry_type_set(mlxsw_sp, fib_entry, rt_arr[0]);
+ if (err)
+ goto err_fib6_entry_type_set;
fib_entry->fib_node = fib_node;
return fib6_entry;
+err_fib6_entry_type_set:
+ mlxsw_sp_nexthop_group_vr_unlink(fib_entry->nh_group, fib_node->fib);
err_nexthop_group_vr_link:
mlxsw_sp_nexthop6_group_put(mlxsw_sp, fib_entry);
err_nexthop6_group_get:
@@ -6983,11 +7115,19 @@ err_fib_entry_priv_create:
return ERR_PTR(err);
}
+static void
+mlxsw_sp_fib6_entry_type_unset(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib6_entry *fib6_entry)
+{
+ mlxsw_sp_fib_entry_type_unset(mlxsw_sp, &fib6_entry->common);
+}
+
static void mlxsw_sp_fib6_entry_destroy(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib6_entry *fib6_entry)
{
struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
+ mlxsw_sp_fib6_entry_type_unset(mlxsw_sp, fib6_entry);
mlxsw_sp_nexthop_group_vr_unlink(fib6_entry->common.nh_group,
fib_node->fib);
mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
@@ -7340,16 +7480,6 @@ static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
continue;
mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
}
-
- /* After flushing all the routes, it is not possible anyone is still
- * using the adjacency index that is discarding packets, so free it in
- * case it was allocated.
- */
- if (!mlxsw_sp->router->adj_discard_index_valid)
- return;
- mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
- mlxsw_sp->router->adj_discard_index);
- mlxsw_sp->router->adj_discard_index_valid = false;
}
struct mlxsw_sp_fib6_event {
@@ -9447,7 +9577,6 @@ static int mlxsw_sp_ipips_init(struct mlxsw_sp *mlxsw_sp)
{
int err;
- mlxsw_sp->router->ipip_ops_arr = mlxsw_sp_ipip_ops_arr;
INIT_LIST_HEAD(&mlxsw_sp->router->ipip_list);
err = mlxsw_sp_ipip_ecn_encap_init(mlxsw_sp);
@@ -9460,6 +9589,18 @@ static int mlxsw_sp_ipips_init(struct mlxsw_sp *mlxsw_sp)
return mlxsw_sp_ipip_config_tigcr(mlxsw_sp);
}
+static int mlxsw_sp1_ipips_init(struct mlxsw_sp *mlxsw_sp)
+{
+ mlxsw_sp->router->ipip_ops_arr = mlxsw_sp1_ipip_ops_arr;
+ return mlxsw_sp_ipips_init(mlxsw_sp);
+}
+
+static int mlxsw_sp2_ipips_init(struct mlxsw_sp *mlxsw_sp)
+{
+ mlxsw_sp->router->ipip_ops_arr = mlxsw_sp2_ipip_ops_arr;
+ return mlxsw_sp_ipips_init(mlxsw_sp);
+}
+
static void mlxsw_sp_ipips_fini(struct mlxsw_sp *mlxsw_sp)
{
WARN_ON(!list_empty(&mlxsw_sp->router->ipip_list));
@@ -9874,6 +10015,7 @@ static int mlxsw_sp1_router_init(struct mlxsw_sp *mlxsw_sp)
const struct mlxsw_sp_router_ops mlxsw_sp1_router_ops = {
.init = mlxsw_sp1_router_init,
+ .ipips_init = mlxsw_sp1_ipips_init,
};
static int mlxsw_sp2_router_init(struct mlxsw_sp *mlxsw_sp)
@@ -9889,6 +10031,7 @@ static int mlxsw_sp2_router_init(struct mlxsw_sp *mlxsw_sp)
const struct mlxsw_sp_router_ops mlxsw_sp2_router_ops = {
.init = mlxsw_sp2_router_init,
+ .ipips_init = mlxsw_sp2_ipips_init,
};
int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
@@ -9934,7 +10077,7 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_rifs_init;
- err = mlxsw_sp_ipips_init(mlxsw_sp);
+ err = mlxsw_sp->router_ops->ipips_init(mlxsw_sp);
if (err)
goto err_ipips_init;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
index 25d3eae63501..1d0d28f8ff05 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
@@ -65,8 +65,6 @@ struct mlxsw_sp_router {
struct notifier_block inet6addr_nb;
const struct mlxsw_sp_rif_ops **rif_ops_arr;
const struct mlxsw_sp_ipip_ops **ipip_ops_arr;
- u32 adj_discard_index;
- bool adj_discard_index_valid;
struct mlxsw_sp_router_nve_decap nve_decap_config;
struct mutex lock; /* Protects shared router resources */
struct work_struct fib_event_work;
@@ -82,6 +80,8 @@ struct mlxsw_sp_router {
struct delayed_work nh_grp_activity_dw;
struct list_head nh_res_grp_list;
bool inc_parsing_depth;
+ refcount_t num_groups;
+ u32 adj_trap_index;
};
struct mlxsw_sp_fib_entry_priv {
@@ -226,6 +226,8 @@ static inline bool mlxsw_sp_l3addr_eq(const union mlxsw_sp_l3addr *addr1,
int mlxsw_sp_ipip_ecn_encap_init(struct mlxsw_sp *mlxsw_sp);
int mlxsw_sp_ipip_ecn_decap_init(struct mlxsw_sp *mlxsw_sp);
+struct net_device *
+mlxsw_sp_ipip_netdev_ul_dev_get(const struct net_device *ol_dev);
extern const struct mlxsw_sp_router_ll_ops mlxsw_sp_router_ll_xm_ops;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
index 3398cc01e5ec..f5f819aa9a65 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
@@ -1650,6 +1650,22 @@ void mlxsw_sp_span_trigger_disable(struct mlxsw_sp_port *mlxsw_sp_port,
return trigger_entry->ops->disable(trigger_entry, mlxsw_sp_port, tc);
}
+bool mlxsw_sp_span_trigger_is_ingress(enum mlxsw_sp_span_trigger trigger)
+{
+ switch (trigger) {
+ case MLXSW_SP_SPAN_TRIGGER_INGRESS:
+ case MLXSW_SP_SPAN_TRIGGER_EARLY_DROP:
+ case MLXSW_SP_SPAN_TRIGGER_TAIL_DROP:
+ return true;
+ case MLXSW_SP_SPAN_TRIGGER_EGRESS:
+ case MLXSW_SP_SPAN_TRIGGER_ECN:
+ return false;
+ }
+
+ WARN_ON_ONCE(1);
+ return false;
+}
+
static int mlxsw_sp1_span_init(struct mlxsw_sp *mlxsw_sp)
{
size_t arr_size = ARRAY_SIZE(mlxsw_sp1_span_entry_ops_arr);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h
index efaefd1ae863..82e711afb02b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h
@@ -120,6 +120,7 @@ int mlxsw_sp_span_trigger_enable(struct mlxsw_sp_port *mlxsw_sp_port,
enum mlxsw_sp_span_trigger trigger, u8 tc);
void mlxsw_sp_span_trigger_disable(struct mlxsw_sp_port *mlxsw_sp_port,
enum mlxsw_sp_span_trigger trigger, u8 tc);
+bool mlxsw_sp_span_trigger_is_ingress(enum mlxsw_sp_span_trigger trigger);
extern const struct mlxsw_sp_span_ops mlxsw_sp1_span_ops;
extern const struct mlxsw_sp_span_ops mlxsw_sp2_span_ops;
diff --git a/drivers/net/ethernet/micrel/ks8842.c b/drivers/net/ethernet/micrel/ks8842.c
index b27713906d3a..c11b118dc415 100644
--- a/drivers/net/ethernet/micrel/ks8842.c
+++ b/drivers/net/ethernet/micrel/ks8842.c
@@ -348,13 +348,15 @@ static void ks8842_reset_hw(struct ks8842_adapter *adapter)
ks8842_write16(adapter, 32, 0x1, REG_SW_ID_AND_ENABLE);
}
-static void ks8842_read_mac_addr(struct ks8842_adapter *adapter, u8 *dest)
+static void ks8842_init_mac_addr(struct ks8842_adapter *adapter)
{
+ u8 addr[ETH_ALEN];
int i;
u16 mac;
for (i = 0; i < ETH_ALEN; i++)
- dest[ETH_ALEN - i - 1] = ks8842_read8(adapter, 2, REG_MARL + i);
+ addr[ETH_ALEN - i - 1] = ks8842_read8(adapter, 2, REG_MARL + i);
+ eth_hw_addr_set(adapter->netdev, addr);
if (adapter->conf_flags & MICREL_KS884X) {
/*
@@ -380,7 +382,7 @@ static void ks8842_read_mac_addr(struct ks8842_adapter *adapter, u8 *dest)
}
}
-static void ks8842_write_mac_addr(struct ks8842_adapter *adapter, u8 *mac)
+static void ks8842_write_mac_addr(struct ks8842_adapter *adapter, const u8 *mac)
{
unsigned long flags;
unsigned i;
@@ -1064,7 +1066,7 @@ static int ks8842_set_mac(struct net_device *netdev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(netdev->dev_addr, mac, netdev->addr_len);
+ eth_hw_addr_set(netdev, mac);
ks8842_write_mac_addr(adapter, mac);
return 0;
@@ -1191,12 +1193,11 @@ static int ks8842_probe(struct platform_device *pdev)
if (i < netdev->addr_len)
/* an address was passed, use it */
- memcpy(netdev->dev_addr, pdata->macaddr,
- netdev->addr_len);
+ eth_hw_addr_set(netdev, pdata->macaddr);
}
if (i == netdev->addr_len) {
- ks8842_read_mac_addr(adapter, netdev->dev_addr);
+ ks8842_init_mac_addr(adapter);
if (!is_valid_ether_addr(netdev->dev_addr))
eth_hw_addr_random(netdev);
diff --git a/drivers/net/ethernet/micrel/ks8851.h b/drivers/net/ethernet/micrel/ks8851.h
index e2eb0caeac82..6f34a61739b6 100644
--- a/drivers/net/ethernet/micrel/ks8851.h
+++ b/drivers/net/ethernet/micrel/ks8851.h
@@ -427,7 +427,7 @@ struct ks8851_net {
int ks8851_probe_common(struct net_device *netdev, struct device *dev,
int msg_en);
-int ks8851_remove_common(struct device *dev);
+void ks8851_remove_common(struct device *dev);
int ks8851_suspend(struct device *dev);
int ks8851_resume(struct device *dev);
diff --git a/drivers/net/ethernet/micrel/ks8851_common.c b/drivers/net/ethernet/micrel/ks8851_common.c
index a6db1a8156e1..691206f19ea7 100644
--- a/drivers/net/ethernet/micrel/ks8851_common.c
+++ b/drivers/net/ethernet/micrel/ks8851_common.c
@@ -165,6 +165,7 @@ static void ks8851_read_mac_addr(struct net_device *dev)
{
struct ks8851_net *ks = netdev_priv(dev);
unsigned long flags;
+ u8 addr[ETH_ALEN];
u16 reg;
int i;
@@ -172,9 +173,10 @@ static void ks8851_read_mac_addr(struct net_device *dev)
for (i = 0; i < ETH_ALEN; i += 2) {
reg = ks8851_rdreg16(ks, KS_MAR(i));
- dev->dev_addr[i] = reg >> 8;
- dev->dev_addr[i + 1] = reg & 0xff;
+ addr[i] = reg >> 8;
+ addr[i + 1] = reg & 0xff;
}
+ eth_hw_addr_set(dev, addr);
ks8851_unlock(ks, &flags);
}
@@ -195,7 +197,7 @@ static void ks8851_init_mac(struct ks8851_net *ks, struct device_node *np)
struct net_device *dev = ks->netdev;
int ret;
- ret = of_get_mac_address(np, dev->dev_addr);
+ ret = of_get_ethdev_address(np, dev);
if (!ret) {
ks8851_write_mac_addr(dev);
return;
@@ -672,7 +674,7 @@ static int ks8851_set_mac_address(struct net_device *dev, void *addr)
if (!is_valid_ether_addr(sa->sa_data))
return -EADDRNOTAVAIL;
- memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN);
+ eth_hw_addr_set(dev, sa->sa_data);
return ks8851_write_mac_addr(dev);
}
@@ -1247,7 +1249,7 @@ err_reg_io:
}
EXPORT_SYMBOL_GPL(ks8851_probe_common);
-int ks8851_remove_common(struct device *dev)
+void ks8851_remove_common(struct device *dev)
{
struct ks8851_net *priv = dev_get_drvdata(dev);
@@ -1261,8 +1263,6 @@ int ks8851_remove_common(struct device *dev)
gpio_set_value(priv->gpio, 0);
regulator_disable(priv->vdd_reg);
regulator_disable(priv->vdd_io);
-
- return 0;
}
EXPORT_SYMBOL_GPL(ks8851_remove_common);
diff --git a/drivers/net/ethernet/micrel/ks8851_par.c b/drivers/net/ethernet/micrel/ks8851_par.c
index 2e8fcce50f9d..2e25798c610e 100644
--- a/drivers/net/ethernet/micrel/ks8851_par.c
+++ b/drivers/net/ethernet/micrel/ks8851_par.c
@@ -327,7 +327,9 @@ static int ks8851_probe_par(struct platform_device *pdev)
static int ks8851_remove_par(struct platform_device *pdev)
{
- return ks8851_remove_common(&pdev->dev);
+ ks8851_remove_common(&pdev->dev);
+
+ return 0;
}
static const struct of_device_id ks8851_match_table[] = {
diff --git a/drivers/net/ethernet/micrel/ks8851_spi.c b/drivers/net/ethernet/micrel/ks8851_spi.c
index 479406ecbaa3..0303e727e99f 100644
--- a/drivers/net/ethernet/micrel/ks8851_spi.c
+++ b/drivers/net/ethernet/micrel/ks8851_spi.c
@@ -454,7 +454,9 @@ static int ks8851_probe_spi(struct spi_device *spi)
static int ks8851_remove_spi(struct spi_device *spi)
{
- return ks8851_remove_common(&spi->dev);
+ ks8851_remove_common(&spi->dev);
+
+ return 0;
}
static const struct of_device_id ks8851_match_table[] = {
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index a0ee155f9f51..99c0c1491af2 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -4033,7 +4033,7 @@ static void hw_set_add_addr(struct ksz_hw *hw)
}
}
-static int hw_add_addr(struct ksz_hw *hw, u8 *mac_addr)
+static int hw_add_addr(struct ksz_hw *hw, const u8 *mac_addr)
{
int i;
int j = ADDITIONAL_ENTRIES;
@@ -4054,7 +4054,7 @@ static int hw_add_addr(struct ksz_hw *hw, u8 *mac_addr)
return -1;
}
-static int hw_del_addr(struct ksz_hw *hw, u8 *mac_addr)
+static int hw_del_addr(struct ksz_hw *hw, const u8 *mac_addr)
{
int i;
@@ -5581,7 +5581,7 @@ static int netdev_set_mac_address(struct net_device *dev, void *addr)
memcpy(hw->override_addr, mac->sa_data, ETH_ALEN);
}
- memcpy(dev->dev_addr, mac->sa_data, ETH_ALEN);
+ eth_hw_addr_set(dev, mac->sa_data);
interrupt = hw_block_intr(hw);
@@ -7005,12 +7005,14 @@ static int pcidev_init(struct pci_dev *pdev, const struct pci_device_id *id)
dev->mem_end = dev->mem_start + reg_len - 1;
dev->irq = pdev->irq;
if (MAIN_PORT == i)
- memcpy(dev->dev_addr, hw_priv->hw.override_addr,
- ETH_ALEN);
+ eth_hw_addr_set(dev, hw_priv->hw.override_addr);
else {
- memcpy(dev->dev_addr, sw->other_addr, ETH_ALEN);
+ u8 addr[ETH_ALEN];
+
+ ether_addr_copy(addr, sw->other_addr);
if (ether_addr_equal(sw->other_addr, hw->override_addr))
- dev->dev_addr[5] += port->first_port;
+ addr[5] += port->first_port;
+ eth_hw_addr_set(dev, addr);
}
dev->netdev_ops = &netdev_ops;
diff --git a/drivers/net/ethernet/microchip/enc28j60.c b/drivers/net/ethernet/microchip/enc28j60.c
index 09cdc2f2e7ff..634ac7649c43 100644
--- a/drivers/net/ethernet/microchip/enc28j60.c
+++ b/drivers/net/ethernet/microchip/enc28j60.c
@@ -517,7 +517,7 @@ static int enc28j60_set_mac_address(struct net_device *dev, void *addr)
if (!is_valid_ether_addr(address->sa_data))
return -EADDRNOTAVAIL;
- ether_addr_copy(dev->dev_addr, address->sa_data);
+ eth_hw_addr_set(dev, address->sa_data);
return enc28j60_set_hw_macaddr(dev);
}
@@ -1539,7 +1539,6 @@ static const struct net_device_ops enc28j60_netdev_ops = {
static int enc28j60_probe(struct spi_device *spi)
{
- unsigned char macaddr[ETH_ALEN];
struct net_device *dev;
struct enc28j60_net *priv;
int ret = 0;
@@ -1572,9 +1571,7 @@ static int enc28j60_probe(struct spi_device *spi)
goto error_irq;
}
- if (device_get_mac_address(&spi->dev, macaddr, sizeof(macaddr)))
- ether_addr_copy(dev->dev_addr, macaddr);
- else
+ if (device_get_ethdev_address(&spi->dev, dev))
eth_hw_addr_random(dev);
enc28j60_set_hw_macaddr(dev);
diff --git a/drivers/net/ethernet/microchip/encx24j600.c b/drivers/net/ethernet/microchip/encx24j600.c
index 0bc6b3176fbf..b90efc80fb59 100644
--- a/drivers/net/ethernet/microchip/encx24j600.c
+++ b/drivers/net/ethernet/microchip/encx24j600.c
@@ -761,7 +761,7 @@ static int encx24j600_set_mac_address(struct net_device *dev, void *addr)
if (!is_valid_ether_addr(address->sa_data))
return -EADDRNOTAVAIL;
- memcpy(dev->dev_addr, address->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, address->sa_data);
return encx24j600_set_hw_macaddr(dev);
}
@@ -1001,6 +1001,7 @@ static int encx24j600_spi_probe(struct spi_device *spi)
struct net_device *ndev;
struct encx24j600_priv *priv;
u16 eidled;
+ u8 addr[ETH_ALEN];
ndev = alloc_etherdev(sizeof(struct encx24j600_priv));
@@ -1056,7 +1057,8 @@ static int encx24j600_spi_probe(struct spi_device *spi)
}
/* Get the MAC address from the chip */
- encx24j600_hw_get_macaddr(priv, ndev->dev_addr);
+ encx24j600_hw_get_macaddr(priv, addr);
+ eth_hw_addr_set(ndev, addr);
ndev->ethtool_ops = &encx24j600_ethtool_ops;
@@ -1125,4 +1127,3 @@ module_spi_driver(encx24j600_spi_net_driver);
MODULE_DESCRIPTION(DRV_NAME " ethernet driver");
MODULE_AUTHOR("Jon Ringle <jringle@gridpoint.com>");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("spi:" DRV_NAME);
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index 9e8561cdc32a..03d02403c19e 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -816,7 +816,7 @@ static int lan743x_mac_init(struct lan743x_adapter *adapter)
eth_random_addr(adapter->mac_address);
}
lan743x_mac_set_address(adapter, adapter->mac_address);
- ether_addr_copy(netdev->dev_addr, adapter->mac_address);
+ eth_hw_addr_set(netdev, adapter->mac_address);
return 0;
}
@@ -2645,7 +2645,7 @@ static int lan743x_netdev_set_mac_address(struct net_device *netdev,
ret = eth_prepare_mac_addr_change(netdev, sock_addr);
if (ret)
return ret;
- ether_addr_copy(netdev->dev_addr, sock_addr->sa_data);
+ eth_hw_addr_set(netdev, sock_addr->sa_data);
lan743x_mac_set_address(adapter, sock_addr->sa_data);
lan743x_rfe_update_mac_address(adapter);
return 0;
diff --git a/drivers/net/ethernet/microchip/lan743x_main.h b/drivers/net/ethernet/microchip/lan743x_main.h
index 6080028c1df2..34c22eea0124 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.h
+++ b/drivers/net/ethernet/microchip/lan743x_main.h
@@ -279,6 +279,7 @@
#define PTP_GENERAL_CONFIG_CLOCK_EVENT_1MS_ (3)
#define PTP_GENERAL_CONFIG_CLOCK_EVENT_10MS_ (4)
#define PTP_GENERAL_CONFIG_CLOCK_EVENT_200MS_ (5)
+#define PTP_GENERAL_CONFIG_CLOCK_EVENT_TOGGLE_ (6)
#define PTP_GENERAL_CONFIG_CLOCK_EVENT_X_SET_(channel, value) \
(((value) & 0x7) << (1 + ((channel) << 2)))
#define PTP_GENERAL_CONFIG_RELOAD_ADD_X_(channel) (BIT((channel) << 2))
diff --git a/drivers/net/ethernet/microchip/lan743x_ptp.c b/drivers/net/ethernet/microchip/lan743x_ptp.c
index ab6d719d40f0..9380e396f648 100644
--- a/drivers/net/ethernet/microchip/lan743x_ptp.c
+++ b/drivers/net/ethernet/microchip/lan743x_ptp.c
@@ -491,9 +491,10 @@ static int lan743x_ptp_perout(struct lan743x_adapter *adapter, int on,
int perout_pin = 0;
unsigned int index = perout_request->index;
struct lan743x_ptp_perout *perout = &ptp->perout[index];
+ int ret = 0;
/* Reject requests with unsupported flags */
- if (perout_request->flags)
+ if (perout_request->flags & ~PTP_PEROUT_DUTY_CYCLE)
return -EOPNOTSUPP;
if (on) {
@@ -518,6 +519,7 @@ static int lan743x_ptp_perout(struct lan743x_adapter *adapter, int on,
netif_warn(adapter, drv, adapter->netdev,
"Failed to reserve event channel %d for PEROUT\n",
index);
+ ret = -EBUSY;
goto failed;
}
@@ -529,6 +531,7 @@ static int lan743x_ptp_perout(struct lan743x_adapter *adapter, int on,
netif_warn(adapter, drv, adapter->netdev,
"Failed to reserve gpio %d for PEROUT\n",
perout_pin);
+ ret = -EBUSY;
goto failed;
}
@@ -540,27 +543,93 @@ static int lan743x_ptp_perout(struct lan743x_adapter *adapter, int on,
period_sec += perout_request->period.nsec / 1000000000;
period_nsec = perout_request->period.nsec % 1000000000;
- if (period_sec == 0) {
- if (period_nsec >= 400000000) {
+ if (perout_request->flags & PTP_PEROUT_DUTY_CYCLE) {
+ struct timespec64 ts_on, ts_period;
+ s64 wf_high, period64, half;
+ s32 reminder;
+
+ ts_on.tv_sec = perout_request->on.sec;
+ ts_on.tv_nsec = perout_request->on.nsec;
+ wf_high = timespec64_to_ns(&ts_on);
+ ts_period.tv_sec = perout_request->period.sec;
+ ts_period.tv_nsec = perout_request->period.nsec;
+ period64 = timespec64_to_ns(&ts_period);
+
+ if (period64 < 200) {
+ netif_warn(adapter, drv, adapter->netdev,
+ "perout period too small, minimum is 200nS\n");
+ ret = -EOPNOTSUPP;
+ goto failed;
+ }
+ if (wf_high >= period64) {
+ netif_warn(adapter, drv, adapter->netdev,
+ "pulse width must be smaller than period\n");
+ ret = -EINVAL;
+ goto failed;
+ }
+
+ /* Check if we can do 50% toggle on an even value of period.
+ * If the period number is odd, then check if the requested
+ * pulse width is the same as one of pre-defined width values.
+ * Otherwise, return failure.
+ */
+ half = div_s64_rem(period64, 2, &reminder);
+ if (!reminder) {
+ if (half == wf_high) {
+ /* It's 50% match. Use the toggle option */
+ pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_TOGGLE_;
+ /* In this case, devide period value by 2 */
+ ts_period = ns_to_timespec64(div_s64(period64, 2));
+ period_sec = ts_period.tv_sec;
+ period_nsec = ts_period.tv_nsec;
+
+ goto program;
+ }
+ }
+ /* if we can't do toggle, then the width option needs to be the exact match */
+ if (wf_high == 200000000) {
pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_200MS_;
- } else if (period_nsec >= 20000000) {
+ } else if (wf_high == 10000000) {
pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_10MS_;
- } else if (period_nsec >= 2000000) {
+ } else if (wf_high == 1000000) {
pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_1MS_;
- } else if (period_nsec >= 200000) {
+ } else if (wf_high == 100000) {
pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_100US_;
- } else if (period_nsec >= 20000) {
+ } else if (wf_high == 10000) {
pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_10US_;
- } else if (period_nsec >= 200) {
+ } else if (wf_high == 100) {
pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_100NS_;
} else {
netif_warn(adapter, drv, adapter->netdev,
- "perout period too small, minimum is 200nS\n");
+ "duty cycle specified is not supported\n");
+ ret = -EOPNOTSUPP;
goto failed;
}
} else {
- pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_200MS_;
+ if (period_sec == 0) {
+ if (period_nsec >= 400000000) {
+ pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_200MS_;
+ } else if (period_nsec >= 20000000) {
+ pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_10MS_;
+ } else if (period_nsec >= 2000000) {
+ pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_1MS_;
+ } else if (period_nsec >= 200000) {
+ pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_100US_;
+ } else if (period_nsec >= 20000) {
+ pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_10US_;
+ } else if (period_nsec >= 200) {
+ pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_100NS_;
+ } else {
+ netif_warn(adapter, drv, adapter->netdev,
+ "perout period too small, minimum is 200nS\n");
+ ret = -EOPNOTSUPP;
+ goto failed;
+ }
+ } else {
+ pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_200MS_;
+ }
}
+program:
/* turn off by setting target far in future */
lan743x_csr_write(adapter,
@@ -599,7 +668,7 @@ static int lan743x_ptp_perout(struct lan743x_adapter *adapter, int on,
failed:
lan743x_ptp_perout_off(adapter, index);
- return -ENODEV;
+ return ret;
}
static int lan743x_ptpci_enable(struct ptp_clock_info *ptpci,
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
index 5030dfca3879..4625d4fb4cde 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
@@ -234,8 +234,7 @@ static int sparx5_create_targets(struct sparx5 *sparx5)
}
iomem[idx] = devm_ioremap(sparx5->dev,
iores[idx]->start,
- iores[idx]->end - iores[idx]->start
- + 1);
+ resource_size(iores[idx]));
if (!iomem[idx]) {
dev_err(sparx5->dev, "Unable to get switch registers: %s\n",
iores[idx]->name);
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c b/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c
index cb68eaaac881..e042f117dc7a 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c
@@ -162,7 +162,7 @@ static int sparx5_set_mac_address(struct net_device *dev, void *p)
sparx5_mact_learn(sparx5, PGID_CPU, addr->sa_data, port->pvid);
/* Record the address */
- ether_addr_copy(dev->dev_addr, addr->sa_data);
+ eth_hw_addr_set(dev, addr->sa_data);
return 0;
}
@@ -200,7 +200,6 @@ struct net_device *sparx5_create_netdev(struct sparx5 *sparx5, u32 portno)
{
struct sparx5_port *spx5_port;
struct net_device *ndev;
- u64 val;
ndev = devm_alloc_etherdev(sparx5->dev, sizeof(struct sparx5_port));
if (!ndev)
@@ -216,8 +215,7 @@ struct net_device *sparx5_create_netdev(struct sparx5 *sparx5, u32 portno)
ndev->netdev_ops = &sparx5_port_netdev_ops;
ndev->ethtool_ops = &sparx5_ethtool_ops;
- val = ether_addr_to_u64(sparx5->base_mac) + portno + 1;
- u64_to_ether_addr(val, ndev->dev_addr);
+ eth_hw_addr_gen(ndev, sparx5->base_mac, portno + 1);
return ndev;
}
diff --git a/drivers/net/ethernet/microsoft/mana/hw_channel.c b/drivers/net/ethernet/microsoft/mana/hw_channel.c
index d5c485a6d284..7c7a5fb91f79 100644
--- a/drivers/net/ethernet/microsoft/mana/hw_channel.c
+++ b/drivers/net/ethernet/microsoft/mana/hw_channel.c
@@ -363,7 +363,7 @@ static int mana_hwc_create_cq(struct hw_channel_context *hwc, u16 q_depth,
}
hwc_cq->gdma_cq = cq;
- comp_buf = kcalloc(q_depth, sizeof(struct gdma_comp), GFP_KERNEL);
+ comp_buf = kcalloc(q_depth, sizeof(*comp_buf), GFP_KERNEL);
if (!comp_buf) {
err = -ENOMEM;
goto out;
@@ -580,7 +580,7 @@ static int mana_hwc_test_channel(struct hw_channel_context *hwc, u16 q_depth,
return err;
}
- ctx = kzalloc(q_depth * sizeof(struct hwc_caller_ctx), GFP_KERNEL);
+ ctx = kcalloc(q_depth, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
index 030ae89f3a33..d65697c239c8 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
@@ -1610,7 +1610,7 @@ static int mana_init_port(struct net_device *ndev)
if (apc->num_queues > apc->max_queues)
apc->num_queues = apc->max_queues;
- ether_addr_copy(ndev->dev_addr, apc->mac_addr);
+ eth_hw_addr_set(ndev, apc->mac_addr);
return 0;
diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
index 49def6934cad..15179b9529e1 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.c
+++ b/drivers/net/ethernet/moxa/moxart_ether.c
@@ -65,7 +65,7 @@ static int moxart_set_mac_address(struct net_device *ndev, void *addr)
if (!is_valid_ether_addr(address->sa_data))
return -EADDRNOTAVAIL;
- memcpy(ndev->dev_addr, address->sa_data, ndev->addr_len);
+ eth_hw_addr_set(ndev, address->sa_data);
moxart_update_mac_address(ndev);
return 0;
diff --git a/drivers/net/ethernet/mscc/Kconfig b/drivers/net/ethernet/mscc/Kconfig
index b6a73d151dec..8dd8c7f425d2 100644
--- a/drivers/net/ethernet/mscc/Kconfig
+++ b/drivers/net/ethernet/mscc/Kconfig
@@ -28,7 +28,7 @@ config MSCC_OCELOT_SWITCH
depends on BRIDGE || BRIDGE=n
depends on NET_SWITCHDEV
depends on HAS_IOMEM
- depends on OF_NET
+ depends on OF
select MSCC_OCELOT_SWITCH_LIB
select GENERIC_PHY
help
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index a08e4f530c1c..4e5ae687d2e2 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -162,48 +162,117 @@ static int ocelot_vlant_set_mask(struct ocelot *ocelot, u16 vid, u32 mask)
return ocelot_vlant_wait_for_completion(ocelot);
}
-static void ocelot_port_set_native_vlan(struct ocelot *ocelot, int port,
- struct ocelot_vlan native_vlan)
+static int ocelot_port_num_untagged_vlans(struct ocelot *ocelot, int port)
{
- struct ocelot_port *ocelot_port = ocelot->ports[port];
- u32 val = 0;
+ struct ocelot_bridge_vlan *vlan;
+ int num_untagged = 0;
+
+ list_for_each_entry(vlan, &ocelot->vlans, list) {
+ if (!(vlan->portmask & BIT(port)))
+ continue;
- ocelot_port->native_vlan = native_vlan;
+ if (vlan->untagged & BIT(port))
+ num_untagged++;
+ }
- ocelot_rmw_gix(ocelot, REW_PORT_VLAN_CFG_PORT_VID(native_vlan.vid),
- REW_PORT_VLAN_CFG_PORT_VID_M,
- REW_PORT_VLAN_CFG, port);
+ return num_untagged;
+}
+
+static int ocelot_port_num_tagged_vlans(struct ocelot *ocelot, int port)
+{
+ struct ocelot_bridge_vlan *vlan;
+ int num_tagged = 0;
+
+ list_for_each_entry(vlan, &ocelot->vlans, list) {
+ if (!(vlan->portmask & BIT(port)))
+ continue;
+
+ if (!(vlan->untagged & BIT(port)))
+ num_tagged++;
+ }
+
+ return num_tagged;
+}
+
+/* We use native VLAN when we have to mix egress-tagged VLANs with exactly
+ * _one_ egress-untagged VLAN (_the_ native VLAN)
+ */
+static bool ocelot_port_uses_native_vlan(struct ocelot *ocelot, int port)
+{
+ return ocelot_port_num_tagged_vlans(ocelot, port) &&
+ ocelot_port_num_untagged_vlans(ocelot, port) == 1;
+}
+
+static struct ocelot_bridge_vlan *
+ocelot_port_find_native_vlan(struct ocelot *ocelot, int port)
+{
+ struct ocelot_bridge_vlan *vlan;
+
+ list_for_each_entry(vlan, &ocelot->vlans, list)
+ if (vlan->portmask & BIT(port) && vlan->untagged & BIT(port))
+ return vlan;
+
+ return NULL;
+}
+
+/* Keep in sync REW_TAG_CFG_TAG_CFG and, if applicable,
+ * REW_PORT_VLAN_CFG_PORT_VID, with the bridge VLAN table and VLAN awareness
+ * state of the port.
+ */
+static void ocelot_port_manage_port_tag(struct ocelot *ocelot, int port)
+{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ enum ocelot_port_tag_config tag_cfg;
+ bool uses_native_vlan = false;
if (ocelot_port->vlan_aware) {
- if (native_vlan.valid)
- /* Tag all frames except when VID == DEFAULT_VLAN */
- val = REW_TAG_CFG_TAG_CFG(1);
+ uses_native_vlan = ocelot_port_uses_native_vlan(ocelot, port);
+
+ if (uses_native_vlan)
+ tag_cfg = OCELOT_PORT_TAG_NATIVE;
+ else if (ocelot_port_num_untagged_vlans(ocelot, port))
+ tag_cfg = OCELOT_PORT_TAG_DISABLED;
else
- /* Tag all frames */
- val = REW_TAG_CFG_TAG_CFG(3);
+ tag_cfg = OCELOT_PORT_TAG_TRUNK;
} else {
- /* Port tagging disabled. */
- val = REW_TAG_CFG_TAG_CFG(0);
+ tag_cfg = OCELOT_PORT_TAG_DISABLED;
}
- ocelot_rmw_gix(ocelot, val,
+
+ ocelot_rmw_gix(ocelot, REW_TAG_CFG_TAG_CFG(tag_cfg),
REW_TAG_CFG_TAG_CFG_M,
REW_TAG_CFG, port);
+
+ if (uses_native_vlan) {
+ struct ocelot_bridge_vlan *native_vlan;
+
+ /* Not having a native VLAN is impossible, because
+ * ocelot_port_num_untagged_vlans has returned 1.
+ * So there is no use in checking for NULL here.
+ */
+ native_vlan = ocelot_port_find_native_vlan(ocelot, port);
+
+ ocelot_rmw_gix(ocelot,
+ REW_PORT_VLAN_CFG_PORT_VID(native_vlan->vid),
+ REW_PORT_VLAN_CFG_PORT_VID_M,
+ REW_PORT_VLAN_CFG, port);
+ }
}
/* Default vlan to clasify for untagged frames (may be zero) */
static void ocelot_port_set_pvid(struct ocelot *ocelot, int port,
- struct ocelot_vlan pvid_vlan)
+ const struct ocelot_bridge_vlan *pvid_vlan)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
+ u16 pvid = OCELOT_VLAN_UNAWARE_PVID;
u32 val = 0;
ocelot_port->pvid_vlan = pvid_vlan;
- if (!ocelot_port->vlan_aware)
- pvid_vlan.vid = 0;
+ if (ocelot_port->vlan_aware && pvid_vlan)
+ pvid = pvid_vlan->vid;
ocelot_rmw_gix(ocelot,
- ANA_PORT_VLAN_CFG_VLAN_VID(pvid_vlan.vid),
+ ANA_PORT_VLAN_CFG_VLAN_VID(pvid),
ANA_PORT_VLAN_CFG_VLAN_VID_M,
ANA_PORT_VLAN_CFG, port);
@@ -212,7 +281,7 @@ static void ocelot_port_set_pvid(struct ocelot *ocelot, int port,
* classified to VLAN 0, but that is always in our RX filter, so it
* would get accepted were it not for this setting.
*/
- if (!pvid_vlan.valid && ocelot_port->vlan_aware)
+ if (!pvid_vlan && ocelot_port->vlan_aware)
val = ANA_PORT_DROP_CFG_DROP_PRIO_S_TAGGED_ENA |
ANA_PORT_DROP_CFG_DROP_PRIO_C_TAGGED_ENA;
@@ -222,31 +291,90 @@ static void ocelot_port_set_pvid(struct ocelot *ocelot, int port,
ANA_PORT_DROP_CFG, port);
}
-static int ocelot_vlan_member_set(struct ocelot *ocelot, u32 vlan_mask, u16 vid)
+static struct ocelot_bridge_vlan *ocelot_bridge_vlan_find(struct ocelot *ocelot,
+ u16 vid)
{
+ struct ocelot_bridge_vlan *vlan;
+
+ list_for_each_entry(vlan, &ocelot->vlans, list)
+ if (vlan->vid == vid)
+ return vlan;
+
+ return NULL;
+}
+
+static int ocelot_vlan_member_add(struct ocelot *ocelot, int port, u16 vid,
+ bool untagged)
+{
+ struct ocelot_bridge_vlan *vlan = ocelot_bridge_vlan_find(ocelot, vid);
+ unsigned long portmask;
int err;
- err = ocelot_vlant_set_mask(ocelot, vid, vlan_mask);
- if (err)
+ if (vlan) {
+ portmask = vlan->portmask | BIT(port);
+
+ err = ocelot_vlant_set_mask(ocelot, vid, portmask);
+ if (err)
+ return err;
+
+ vlan->portmask = portmask;
+ /* Bridge VLANs can be overwritten with a different
+ * egress-tagging setting, so make sure to override an untagged
+ * with a tagged VID if that's going on.
+ */
+ if (untagged)
+ vlan->untagged |= BIT(port);
+ else
+ vlan->untagged &= ~BIT(port);
+
+ return 0;
+ }
+
+ vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
+ if (!vlan)
+ return -ENOMEM;
+
+ portmask = BIT(port);
+
+ err = ocelot_vlant_set_mask(ocelot, vid, portmask);
+ if (err) {
+ kfree(vlan);
return err;
+ }
- ocelot->vlan_mask[vid] = vlan_mask;
+ vlan->vid = vid;
+ vlan->portmask = portmask;
+ if (untagged)
+ vlan->untagged = BIT(port);
+ INIT_LIST_HEAD(&vlan->list);
+ list_add_tail(&vlan->list, &ocelot->vlans);
return 0;
}
-static int ocelot_vlan_member_add(struct ocelot *ocelot, int port, u16 vid)
-{
- return ocelot_vlan_member_set(ocelot,
- ocelot->vlan_mask[vid] | BIT(port),
- vid);
-}
-
static int ocelot_vlan_member_del(struct ocelot *ocelot, int port, u16 vid)
{
- return ocelot_vlan_member_set(ocelot,
- ocelot->vlan_mask[vid] & ~BIT(port),
- vid);
+ struct ocelot_bridge_vlan *vlan = ocelot_bridge_vlan_find(ocelot, vid);
+ unsigned long portmask;
+ int err;
+
+ if (!vlan)
+ return 0;
+
+ portmask = vlan->portmask & ~BIT(port);
+
+ err = ocelot_vlant_set_mask(ocelot, vid, portmask);
+ if (err)
+ return err;
+
+ vlan->portmask = portmask;
+ if (vlan->portmask)
+ return 0;
+
+ list_del(&vlan->list);
+ kfree(vlan);
+
+ return 0;
}
int ocelot_port_vlan_filtering(struct ocelot *ocelot, int port,
@@ -279,7 +407,7 @@ int ocelot_port_vlan_filtering(struct ocelot *ocelot, int port,
ANA_PORT_VLAN_CFG, port);
ocelot_port_set_pvid(ocelot, port, ocelot_port->pvid_vlan);
- ocelot_port_set_native_vlan(ocelot, port, ocelot_port->native_vlan);
+ ocelot_port_manage_port_tag(ocelot, port);
return 0;
}
@@ -288,14 +416,20 @@ EXPORT_SYMBOL(ocelot_port_vlan_filtering);
int ocelot_vlan_prepare(struct ocelot *ocelot, int port, u16 vid, bool pvid,
bool untagged, struct netlink_ext_ack *extack)
{
- struct ocelot_port *ocelot_port = ocelot->ports[port];
-
- /* Deny changing the native VLAN, but always permit deleting it */
- if (untagged && ocelot_port->native_vlan.vid != vid &&
- ocelot_port->native_vlan.valid) {
- NL_SET_ERR_MSG_MOD(extack,
- "Port already has a native VLAN");
- return -EBUSY;
+ if (untagged) {
+ /* We are adding an egress-tagged VLAN */
+ if (ocelot_port_uses_native_vlan(ocelot, port)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Port with egress-tagged VLANs cannot have more than one egress-untagged (native) VLAN");
+ return -EBUSY;
+ }
+ } else {
+ /* We are adding an egress-tagged VLAN */
+ if (ocelot_port_num_untagged_vlans(ocelot, port) > 1) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Port with more than one egress-untagged VLAN cannot have egress-tagged VLANs");
+ return -EBUSY;
+ }
}
return 0;
@@ -307,27 +441,17 @@ int ocelot_vlan_add(struct ocelot *ocelot, int port, u16 vid, bool pvid,
{
int err;
- err = ocelot_vlan_member_add(ocelot, port, vid);
+ err = ocelot_vlan_member_add(ocelot, port, vid, untagged);
if (err)
return err;
/* Default ingress vlan classification */
- if (pvid) {
- struct ocelot_vlan pvid_vlan;
-
- pvid_vlan.vid = vid;
- pvid_vlan.valid = true;
- ocelot_port_set_pvid(ocelot, port, pvid_vlan);
- }
+ if (pvid)
+ ocelot_port_set_pvid(ocelot, port,
+ ocelot_bridge_vlan_find(ocelot, vid));
/* Untagged egress vlan clasification */
- if (untagged) {
- struct ocelot_vlan native_vlan;
-
- native_vlan.vid = vid;
- native_vlan.valid = true;
- ocelot_port_set_native_vlan(ocelot, port, native_vlan);
- }
+ ocelot_port_manage_port_tag(ocelot, port);
return 0;
}
@@ -343,18 +467,11 @@ int ocelot_vlan_del(struct ocelot *ocelot, int port, u16 vid)
return err;
/* Ingress */
- if (ocelot_port->pvid_vlan.vid == vid) {
- struct ocelot_vlan pvid_vlan = {0};
-
- ocelot_port_set_pvid(ocelot, port, pvid_vlan);
- }
+ if (ocelot_port->pvid_vlan && ocelot_port->pvid_vlan->vid == vid)
+ ocelot_port_set_pvid(ocelot, port, NULL);
/* Egress */
- if (ocelot_port->native_vlan.vid == vid) {
- struct ocelot_vlan native_vlan = {0};
-
- ocelot_port_set_native_vlan(ocelot, port, native_vlan);
- }
+ ocelot_port_manage_port_tag(ocelot, port);
return 0;
}
@@ -372,13 +489,13 @@ static void ocelot_vlan_init(struct ocelot *ocelot)
/* Configure the port VLAN memberships */
for (vid = 1; vid < VLAN_N_VID; vid++)
- ocelot_vlan_member_set(ocelot, 0, vid);
+ ocelot_vlant_set_mask(ocelot, vid, 0);
/* Because VLAN filtering is enabled, we need VID 0 to get untagged
* traffic. It is added automatically if 8021q module is loaded, but
* we can't rely on it since module may be not loaded.
*/
- ocelot_vlan_member_set(ocelot, all_ports, 0);
+ ocelot_vlant_set_mask(ocelot, OCELOT_VLAN_UNAWARE_PVID, all_ports);
/* Set vlan ingress filter mask to all ports but the CPU port by
* default.
@@ -951,7 +1068,7 @@ void ocelot_port_inject_frame(struct ocelot *ocelot, int port, int grp,
ocelot_ifh_set_bypass(ifh, 1);
ocelot_ifh_set_dest(ifh, BIT_ULL(port));
ocelot_ifh_set_tag_type(ifh, IFH_TAG_TYPE_C);
- ocelot_ifh_set_vid(ifh, skb_vlan_tag_get(skb));
+ ocelot_ifh_set_vlan_tci(ifh, skb_vlan_tag_get(skb));
ocelot_ifh_set_rew_op(ifh, rew_op);
for (i = 0; i < OCELOT_TAG_LEN / 4; i++)
@@ -1680,12 +1797,11 @@ void ocelot_port_bridge_leave(struct ocelot *ocelot, int port,
struct net_device *bridge)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
- struct ocelot_vlan pvid = {0}, native_vlan = {0};
ocelot_port->bridge = NULL;
- ocelot_port_set_pvid(ocelot, port, pvid);
- ocelot_port_set_native_vlan(ocelot, port, native_vlan);
+ ocelot_port_set_pvid(ocelot, port, NULL);
+ ocelot_port_manage_port_tag(ocelot, port);
ocelot_apply_bridge_fwd_mask(ocelot);
}
EXPORT_SYMBOL(ocelot_port_bridge_leave);
@@ -2071,9 +2187,10 @@ static void ocelot_cpu_port_init(struct ocelot *ocelot)
OCELOT_TAG_PREFIX_NONE);
/* Configure the CPU port to be VLAN aware */
- ocelot_write_gix(ocelot, ANA_PORT_VLAN_CFG_VLAN_VID(0) |
- ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA |
- ANA_PORT_VLAN_CFG_VLAN_POP_CNT(1),
+ ocelot_write_gix(ocelot,
+ ANA_PORT_VLAN_CFG_VLAN_VID(OCELOT_VLAN_UNAWARE_PVID) |
+ ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA |
+ ANA_PORT_VLAN_CFG_VLAN_POP_CNT(1),
ANA_PORT_VLAN_CFG, cpu);
}
@@ -2130,6 +2247,7 @@ int ocelot_init(struct ocelot *ocelot)
INIT_LIST_HEAD(&ocelot->multicast);
INIT_LIST_HEAD(&ocelot->pgids);
+ INIT_LIST_HEAD(&ocelot->vlans);
ocelot_detect_features(ocelot);
ocelot_mact_init(ocelot);
ocelot_vlan_init(ocelot);
diff --git a/drivers/net/ethernet/mscc/ocelot.h b/drivers/net/ethernet/mscc/ocelot.h
index 1952d6a1b98a..e43da09b8f91 100644
--- a/drivers/net/ethernet/mscc/ocelot.h
+++ b/drivers/net/ethernet/mscc/ocelot.h
@@ -25,6 +25,7 @@
#include "ocelot_rew.h"
#include "ocelot_qs.h"
+#define OCELOT_VLAN_UNAWARE_PVID 0
#define OCELOT_BUFFER_CELL_SZ 60
#define OCELOT_STATS_CHECK_DELAY (2 * HZ)
diff --git a/drivers/net/ethernet/mscc/ocelot_flower.c b/drivers/net/ethernet/mscc/ocelot_flower.c
index 8b843d3c9189..769a8159373e 100644
--- a/drivers/net/ethernet/mscc/ocelot_flower.c
+++ b/drivers/net/ethernet/mscc/ocelot_flower.c
@@ -142,17 +142,77 @@ ocelot_find_vcap_filter_that_points_at(struct ocelot *ocelot, int chain)
return NULL;
}
+static int
+ocelot_flower_parse_ingress_vlan_modify(struct ocelot *ocelot, int port,
+ struct ocelot_vcap_filter *filter,
+ const struct flow_action_entry *a,
+ struct netlink_ext_ack *extack)
+{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+
+ if (filter->goto_target != -1) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Last action must be GOTO");
+ return -EOPNOTSUPP;
+ }
+
+ if (!ocelot_port->vlan_aware) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can only modify VLAN under VLAN aware bridge");
+ return -EOPNOTSUPP;
+ }
+
+ filter->action.vid_replace_ena = true;
+ filter->action.pcp_dei_ena = true;
+ filter->action.vid = a->vlan.vid;
+ filter->action.pcp = a->vlan.prio;
+ filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
+
+ return 0;
+}
+
+static int
+ocelot_flower_parse_egress_vlan_modify(struct ocelot_vcap_filter *filter,
+ const struct flow_action_entry *a,
+ struct netlink_ext_ack *extack)
+{
+ enum ocelot_tag_tpid_sel tpid;
+
+ switch (ntohs(a->vlan.proto)) {
+ case ETH_P_8021Q:
+ tpid = OCELOT_TAG_TPID_SEL_8021Q;
+ break;
+ case ETH_P_8021AD:
+ tpid = OCELOT_TAG_TPID_SEL_8021AD;
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot modify custom TPID");
+ return -EOPNOTSUPP;
+ }
+
+ filter->action.tag_a_tpid_sel = tpid;
+ filter->action.push_outer_tag = OCELOT_ES0_TAG;
+ filter->action.tag_a_vid_sel = OCELOT_ES0_VID_PLUS_CLASSIFIED_VID;
+ filter->action.vid_a_val = a->vlan.vid;
+ filter->action.pcp_a_val = a->vlan.prio;
+ filter->action.tag_a_pcp_sel = OCELOT_ES0_PCP;
+ filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
+
+ return 0;
+}
+
static int ocelot_flower_parse_action(struct ocelot *ocelot, int port,
bool ingress, struct flow_cls_offload *f,
struct ocelot_vcap_filter *filter)
{
- struct ocelot_port *ocelot_port = ocelot->ports[port];
struct netlink_ext_ack *extack = f->common.extack;
bool allow_missing_goto_target = false;
const struct flow_action_entry *a;
enum ocelot_tag_tpid_sel tpid;
int i, chain, egress_port;
u64 rate;
+ int err;
if (!flow_action_basic_hw_stats_check(&f->rule->action,
f->common.extack))
@@ -273,26 +333,20 @@ static int ocelot_flower_parse_action(struct ocelot *ocelot, int port,
filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
break;
case FLOW_ACTION_VLAN_MANGLE:
- if (filter->block_id != VCAP_IS1) {
- NL_SET_ERR_MSG_MOD(extack,
- "VLAN modify action can only be offloaded to VCAP IS1");
- return -EOPNOTSUPP;
- }
- if (filter->goto_target != -1) {
+ if (filter->block_id == VCAP_IS1) {
+ err = ocelot_flower_parse_ingress_vlan_modify(ocelot, port,
+ filter, a,
+ extack);
+ } else if (filter->block_id == VCAP_ES0) {
+ err = ocelot_flower_parse_egress_vlan_modify(filter, a,
+ extack);
+ } else {
NL_SET_ERR_MSG_MOD(extack,
- "Last action must be GOTO");
- return -EOPNOTSUPP;
+ "VLAN modify action can only be offloaded to VCAP IS1 or ES0");
+ err = -EOPNOTSUPP;
}
- if (!ocelot_port->vlan_aware) {
- NL_SET_ERR_MSG_MOD(extack,
- "Can only modify VLAN under VLAN aware bridge");
- return -EOPNOTSUPP;
- }
- filter->action.vid_replace_ena = true;
- filter->action.pcp_dei_ena = true;
- filter->action.vid = a->vlan.vid;
- filter->action.pcp = a->vlan.prio;
- filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
+ if (err)
+ return err;
break;
case FLOW_ACTION_PRIORITY:
if (filter->block_id != VCAP_IS1) {
@@ -340,7 +394,7 @@ static int ocelot_flower_parse_action(struct ocelot *ocelot, int port,
}
filter->action.tag_a_tpid_sel = tpid;
filter->action.push_outer_tag = OCELOT_ES0_TAG;
- filter->action.tag_a_vid_sel = 1;
+ filter->action.tag_a_vid_sel = OCELOT_ES0_VID;
filter->action.vid_a_val = a->vlan.vid;
filter->action.pcp_a_val = a->vlan.prio;
filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
@@ -678,6 +732,31 @@ static int ocelot_vcap_dummy_filter_del(struct ocelot *ocelot,
return 0;
}
+/* If we have an egress VLAN modification rule, we need to actually write the
+ * delta between the input VLAN (from the key) and the output VLAN (from the
+ * action), but the action was parsed first. So we need to patch the delta into
+ * the action here.
+ */
+static int
+ocelot_flower_patch_es0_vlan_modify(struct ocelot_vcap_filter *filter,
+ struct netlink_ext_ack *extack)
+{
+ if (filter->block_id != VCAP_ES0 ||
+ filter->action.tag_a_vid_sel != OCELOT_ES0_VID_PLUS_CLASSIFIED_VID)
+ return 0;
+
+ if (filter->vlan.vid.mask != VLAN_VID_MASK) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "VCAP ES0 VLAN rewriting needs a full VLAN in the key");
+ return -EOPNOTSUPP;
+ }
+
+ filter->action.vid_a_val -= filter->vlan.vid.value;
+ filter->action.vid_a_val &= VLAN_VID_MASK;
+
+ return 0;
+}
+
int ocelot_cls_flower_replace(struct ocelot *ocelot, int port,
struct flow_cls_offload *f, bool ingress)
{
@@ -701,6 +780,12 @@ int ocelot_cls_flower_replace(struct ocelot *ocelot, int port,
return ret;
}
+ ret = ocelot_flower_patch_es0_vlan_modify(filter, extack);
+ if (ret) {
+ kfree(filter);
+ return ret;
+ }
+
/* The non-optional GOTOs for the TCAM skeleton don't need
* to be actually offloaded.
*/
diff --git a/drivers/net/ethernet/mscc/ocelot_mrp.c b/drivers/net/ethernet/mscc/ocelot_mrp.c
index 4b0941f09f71..1fa58546abdc 100644
--- a/drivers/net/ethernet/mscc/ocelot_mrp.c
+++ b/drivers/net/ethernet/mscc/ocelot_mrp.c
@@ -116,16 +116,16 @@ static void ocelot_mrp_save_mac(struct ocelot *ocelot,
struct ocelot_port *port)
{
ocelot_mact_learn(ocelot, PGID_BLACKHOLE, mrp_test_dmac,
- port->pvid_vlan.vid, ENTRYTYPE_LOCKED);
+ OCELOT_VLAN_UNAWARE_PVID, ENTRYTYPE_LOCKED);
ocelot_mact_learn(ocelot, PGID_BLACKHOLE, mrp_control_dmac,
- port->pvid_vlan.vid, ENTRYTYPE_LOCKED);
+ OCELOT_VLAN_UNAWARE_PVID, ENTRYTYPE_LOCKED);
}
static void ocelot_mrp_del_mac(struct ocelot *ocelot,
struct ocelot_port *port)
{
- ocelot_mact_forget(ocelot, mrp_test_dmac, port->pvid_vlan.vid);
- ocelot_mact_forget(ocelot, mrp_control_dmac, port->pvid_vlan.vid);
+ ocelot_mact_forget(ocelot, mrp_test_dmac, OCELOT_VLAN_UNAWARE_PVID);
+ ocelot_mact_forget(ocelot, mrp_control_dmac, OCELOT_VLAN_UNAWARE_PVID);
}
int ocelot_mrp_add(struct ocelot *ocelot, int port,
diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c
index 2545727fd5b2..e3fc4548f642 100644
--- a/drivers/net/ethernet/mscc/ocelot_net.c
+++ b/drivers/net/ethernet/mscc/ocelot_net.c
@@ -418,7 +418,7 @@ static int ocelot_vlan_vid_del(struct net_device *dev, u16 vid)
* with VLAN filtering feature. We need to keep it to receive
* untagged traffic.
*/
- if (vid == 0)
+ if (vid == OCELOT_VLAN_UNAWARE_PVID)
return 0;
ret = ocelot_vlan_del(ocelot, port, vid);
@@ -553,7 +553,7 @@ static int ocelot_mc_unsync(struct net_device *dev, const unsigned char *addr)
struct ocelot_mact_work_ctx w;
ether_addr_copy(w.forget.addr, addr);
- w.forget.vid = ocelot_port->pvid_vlan.vid;
+ w.forget.vid = OCELOT_VLAN_UNAWARE_PVID;
w.type = OCELOT_MACT_FORGET;
return ocelot_enqueue_mact_action(ocelot, &w);
@@ -567,7 +567,7 @@ static int ocelot_mc_sync(struct net_device *dev, const unsigned char *addr)
struct ocelot_mact_work_ctx w;
ether_addr_copy(w.learn.addr, addr);
- w.learn.vid = ocelot_port->pvid_vlan.vid;
+ w.learn.vid = OCELOT_VLAN_UNAWARE_PVID;
w.learn.pgid = PGID_CPU;
w.learn.entry_type = ENTRYTYPE_LOCKED;
w.type = OCELOT_MACT_LEARN;
@@ -602,11 +602,11 @@ static int ocelot_port_set_mac_address(struct net_device *dev, void *p)
/* Learn the new net device MAC address in the mac table. */
ocelot_mact_learn(ocelot, PGID_CPU, addr->sa_data,
- ocelot_port->pvid_vlan.vid, ENTRYTYPE_LOCKED);
+ OCELOT_VLAN_UNAWARE_PVID, ENTRYTYPE_LOCKED);
/* Then forget the previous one. */
- ocelot_mact_forget(ocelot, dev->dev_addr, ocelot_port->pvid_vlan.vid);
+ ocelot_mact_forget(ocelot, dev->dev_addr, OCELOT_VLAN_UNAWARE_PVID);
- ether_addr_copy(dev->dev_addr, addr->sa_data);
+ eth_hw_addr_set(dev, addr->sa_data);
return 0;
}
@@ -1705,10 +1705,9 @@ int ocelot_probe_port(struct ocelot *ocelot, int port, struct regmap *target,
NETIF_F_HW_TC;
dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC;
- memcpy(dev->dev_addr, ocelot->base_mac, ETH_ALEN);
- dev->dev_addr[ETH_ALEN - 1] += port;
+ eth_hw_addr_gen(dev, ocelot->base_mac, port);
ocelot_mact_learn(ocelot, PGID_CPU, dev->dev_addr,
- ocelot_port->pvid_vlan.vid, ENTRYTYPE_LOCKED);
+ OCELOT_VLAN_UNAWARE_PVID, ENTRYTYPE_LOCKED);
ocelot_init_port(ocelot, port);
diff --git a/drivers/net/ethernet/mscc/ocelot_vsc7514.c b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
index d51f799e4e86..38103b0255b0 100644
--- a/drivers/net/ethernet/mscc/ocelot_vsc7514.c
+++ b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
@@ -1135,10 +1135,6 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
if (err)
goto out_put_ports;
- err = devlink_register(devlink);
- if (err)
- goto out_ocelot_deinit;
-
err = mscc_ocelot_init_ports(pdev, ports);
if (err)
goto out_ocelot_devlink_unregister;
@@ -1161,6 +1157,7 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
register_switchdev_blocking_notifier(&ocelot_switchdev_blocking_nb);
of_node_put(ports);
+ devlink_register(devlink);
dev_info(&pdev->dev, "Ocelot switch probed\n");
@@ -1170,8 +1167,6 @@ out_ocelot_release_ports:
mscc_ocelot_release_ports(ocelot);
mscc_ocelot_teardown_devlink_ports(ocelot);
out_ocelot_devlink_unregister:
- devlink_unregister(devlink);
-out_ocelot_deinit:
ocelot_deinit(ocelot);
out_put_ports:
of_node_put(ports);
@@ -1184,11 +1179,11 @@ static int mscc_ocelot_remove(struct platform_device *pdev)
{
struct ocelot *ocelot = platform_get_drvdata(pdev);
+ devlink_unregister(ocelot->devlink);
ocelot_deinit_timestamp(ocelot);
ocelot_devlink_sb_unregister(ocelot);
mscc_ocelot_release_ports(ocelot);
mscc_ocelot_teardown_devlink_ports(ocelot);
- devlink_unregister(ocelot->devlink);
ocelot_deinit(ocelot);
unregister_switchdev_blocking_notifier(&ocelot_switchdev_blocking_nb);
unregister_switchdev_notifier(&ocelot_switchdev_nb);
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index c1a75b08ced7..5736fcdafd7a 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -796,7 +796,8 @@ static int myri10ge_load_firmware(struct myri10ge_priv *mgp, int adopt)
return status;
}
-static int myri10ge_update_mac_address(struct myri10ge_priv *mgp, u8 * addr)
+static int myri10ge_update_mac_address(struct myri10ge_priv *mgp,
+ const u8 * addr)
{
struct myri10ge_cmd cmd;
int status;
@@ -3022,7 +3023,7 @@ static int myri10ge_set_mac_address(struct net_device *dev, void *addr)
}
/* change the dev structure */
- memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN);
+ eth_hw_addr_set(dev, sa->sa_data);
return 0;
}
@@ -3738,7 +3739,6 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct net_device *netdev;
struct myri10ge_priv *mgp;
struct device *dev = &pdev->dev;
- int i;
int status = -ENXIO;
int dac_enabled;
unsigned hdr_offset, ss_offset;
@@ -3828,8 +3828,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (status)
goto abort_with_ioremap;
- for (i = 0; i < ETH_ALEN; i++)
- netdev->dev_addr[i] = mgp->mac_addr[i];
+ eth_hw_addr_set(netdev, mgp->mac_addr);
myri10ge_select_firmware(mgp);
diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c
index 3f982033944b..82a22711ce45 100644
--- a/drivers/net/ethernet/natsemi/natsemi.c
+++ b/drivers/net/ethernet/natsemi/natsemi.c
@@ -809,6 +809,7 @@ static int natsemi_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
unsigned long iosize;
void __iomem *ioaddr;
const int pcibar = 1; /* PCI base address register */
+ u8 addr[ETH_ALEN];
int prev_eedata;
u32 tmp;
@@ -859,10 +860,11 @@ static int natsemi_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
prev_eedata = eeprom_read(ioaddr, 6);
for (i = 0; i < 3; i++) {
int eedata = eeprom_read(ioaddr, i + 7);
- dev->dev_addr[i*2] = (eedata << 1) + (prev_eedata >> 15);
- dev->dev_addr[i*2+1] = eedata >> 7;
+ addr[i*2] = (eedata << 1) + (prev_eedata >> 15);
+ addr[i*2+1] = eedata >> 7;
prev_eedata = eedata;
}
+ eth_hw_addr_set(dev, addr);
np = netdev_priv(dev);
np->ioaddr = ioaddr;
diff --git a/drivers/net/ethernet/natsemi/ns83820.c b/drivers/net/ethernet/natsemi/ns83820.c
index 72794d158871..49ea130c9067 100644
--- a/drivers/net/ethernet/natsemi/ns83820.c
+++ b/drivers/net/ethernet/natsemi/ns83820.c
@@ -1649,9 +1649,11 @@ failed:
return ret;
}
-static void ns83820_getmac(struct ns83820 *dev, u8 *mac)
+static void ns83820_getmac(struct ns83820 *dev, struct net_device *ndev)
{
+ u8 mac[ETH_ALEN];
unsigned i;
+
for (i=0; i<3; i++) {
u32 data;
@@ -1661,9 +1663,10 @@ static void ns83820_getmac(struct ns83820 *dev, u8 *mac)
writel(i*2, dev->base + RFCR);
data = readl(dev->base + RFDR);
- *mac++ = data;
- *mac++ = data >> 8;
+ mac[i * 2] = data;
+ mac[i * 2 + 1] = data >> 8;
}
+ eth_hw_addr_set(ndev, mac);
}
static void ns83820_set_multicast(struct net_device *ndev)
@@ -2136,7 +2139,7 @@ static int ns83820_init_one(struct pci_dev *pci_dev,
/* Disable Wake On Lan */
writel(0, dev->base + WCSR);
- ns83820_getmac(dev, ndev->dev_addr);
+ ns83820_getmac(dev, ndev);
/* Yes, we support dumb IP checksum on transmit */
ndev->features |= NETIF_F_SG;
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index 3b6b2e61139e..d1c32c65db05 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -5202,7 +5202,7 @@ static int s2io_set_mac_addr(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, addr->sa_data);
/* store the MAC address in CAM */
return do_s2io_prog_unicast(dev, dev->dev_addr);
@@ -5217,7 +5217,7 @@ static int s2io_set_mac_addr(struct net_device *dev, void *p)
* as defined in errno.h file on failure.
*/
-static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr)
+static int do_s2io_prog_unicast(struct net_device *dev, const u8 *addr)
{
struct s2io_nic *sp = netdev_priv(dev);
register u64 mac_addr = 0, perm_addr = 0;
@@ -7954,7 +7954,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
/* Set the factory defined MAC address initially */
dev->addr_len = ETH_ALEN;
- memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
+ eth_hw_addr_set(dev, sp->def_mac_addr[0].mac_addr);
/* initialize number of multicast & unicast MAC entries variables */
if (sp->device_type == XFRAME_I_DEVICE) {
diff --git a/drivers/net/ethernet/neterion/s2io.h b/drivers/net/ethernet/neterion/s2io.h
index 5a6032212c19..a4266d1544ab 100644
--- a/drivers/net/ethernet/neterion/s2io.h
+++ b/drivers/net/ethernet/neterion/s2io.h
@@ -1073,7 +1073,7 @@ static void s2io_reset(struct s2io_nic * sp);
static int s2io_poll_msix(struct napi_struct *napi, int budget);
static int s2io_poll_inta(struct napi_struct *napi, int budget);
static void s2io_init_pci(struct s2io_nic * sp);
-static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr);
+static int do_s2io_prog_unicast(struct net_device *dev, const u8 *addr);
static void s2io_alarm_handle(struct timer_list *t);
static irqreturn_t
s2io_msix_ring_handle(int irq, void *dev_id);
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index df4a3f3da83a..1969009a91e7 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -1328,7 +1328,7 @@ static int vxge_set_mac_addr(struct net_device *dev, void *p)
}
if (unlikely(!is_vxge_card_up(vdev))) {
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, addr->sa_data);
return VXGE_HW_OK;
}
@@ -1341,7 +1341,7 @@ static int vxge_set_mac_addr(struct net_device *dev, void *p)
return -EINVAL;
}
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, addr->sa_data);
return status;
}
@@ -4663,7 +4663,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
/* Store the fw version for ethttool option */
strcpy(vdev->fw_version, ll_config->device_hw_info.fw_version.version);
- memcpy(vdev->ndev->dev_addr, (u8 *)vdev->vpaths[0].macaddr, ETH_ALEN);
+ eth_hw_addr_set(vdev->ndev, (u8 *)vdev->vpaths[0].macaddr);
/* Copy the station mac address to the list */
for (i = 0; i < vdev->no_of_vpath; i++) {
diff --git a/drivers/net/ethernet/netronome/nfp/abm/main.c b/drivers/net/ethernet/netronome/nfp/abm/main.c
index 605a1617b195..5d3df28c648f 100644
--- a/drivers/net/ethernet/netronome/nfp/abm/main.c
+++ b/drivers/net/ethernet/netronome/nfp/abm/main.c
@@ -305,7 +305,7 @@ nfp_abm_vnic_set_mac(struct nfp_pf *pf, struct nfp_abm *abm, struct nfp_net *nn,
return;
}
- ether_addr_copy(nn->dp.netdev->dev_addr, mac_addr);
+ eth_hw_addr_set(nn->dp.netdev, mac_addr);
ether_addr_copy(nn->dp.netdev->perm_addr, mac_addr);
}
diff --git a/drivers/net/ethernet/netronome/nfp/abm/qdisc.c b/drivers/net/ethernet/netronome/nfp/abm/qdisc.c
index 2473fb5f75e5..2a5cc64227e9 100644
--- a/drivers/net/ethernet/netronome/nfp/abm/qdisc.c
+++ b/drivers/net/ethernet/netronome/nfp/abm/qdisc.c
@@ -458,7 +458,7 @@ nfp_abm_qdisc_graft(struct nfp_abm_link *alink, u32 handle, u32 child_handle,
static void
nfp_abm_stats_calculate(struct nfp_alink_stats *new,
struct nfp_alink_stats *old,
- struct gnet_stats_basic_packed *bstats,
+ struct gnet_stats_basic_sync *bstats,
struct gnet_stats_queue *qstats)
{
_bstats_update(bstats, new->tx_bytes - old->tx_bytes,
diff --git a/drivers/net/ethernet/netronome/nfp/devlink_param.c b/drivers/net/ethernet/netronome/nfp/devlink_param.c
index 36491835ac65..db297ee4d7ad 100644
--- a/drivers/net/ethernet/netronome/nfp/devlink_param.c
+++ b/drivers/net/ethernet/netronome/nfp/devlink_param.c
@@ -233,13 +233,8 @@ int nfp_devlink_params_register(struct nfp_pf *pf)
if (err <= 0)
return err;
- err = devlink_params_register(devlink, nfp_devlink_params,
- ARRAY_SIZE(nfp_devlink_params));
- if (err)
- return err;
-
- devlink_params_publish(devlink);
- return 0;
+ return devlink_params_register(devlink, nfp_devlink_params,
+ ARRAY_SIZE(nfp_devlink_params));
}
void nfp_devlink_params_unregister(struct nfp_pf *pf)
diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
index ab70179728f6..dfb4468fe287 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
@@ -837,7 +837,7 @@ nfp_tunnel_put_ipv6_off(struct nfp_app *app, struct nfp_ipv6_addr_entry *entry)
}
static int
-__nfp_tunnel_offload_mac(struct nfp_app *app, u8 *mac, u16 idx, bool del)
+__nfp_tunnel_offload_mac(struct nfp_app *app, const u8 *mac, u16 idx, bool del)
{
struct nfp_tun_mac_addr_offload payload;
@@ -886,7 +886,7 @@ static bool nfp_tunnel_is_mac_idx_global(u16 nfp_mac_idx)
}
static struct nfp_tun_offloaded_mac *
-nfp_tunnel_lookup_offloaded_macs(struct nfp_app *app, u8 *mac)
+nfp_tunnel_lookup_offloaded_macs(struct nfp_app *app, const u8 *mac)
{
struct nfp_flower_priv *priv = app->priv;
@@ -1005,7 +1005,7 @@ err_free_ida:
static int
nfp_tunnel_del_shared_mac(struct nfp_app *app, struct net_device *netdev,
- u8 *mac, bool mod)
+ const u8 *mac, bool mod)
{
struct nfp_flower_priv *priv = app->priv;
struct nfp_flower_repr_priv *repr_priv;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
index d10a93801344..751f76cd4f79 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
@@ -55,7 +55,7 @@ nfp_net_get_mac_addr(struct nfp_pf *pf, struct net_device *netdev,
return;
}
- ether_addr_copy(netdev->dev_addr, eth_port->mac_addr);
+ eth_hw_addr_set(netdev, eth_port->mac_addr);
ether_addr_copy(netdev->perm_addr, eth_port->mac_addr);
}
@@ -701,10 +701,6 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
if (err)
goto err_unmap;
- err = devlink_register(devlink);
- if (err)
- goto err_app_clean;
-
err = nfp_shared_buf_register(pf);
if (err)
goto err_devlink_unreg;
@@ -734,6 +730,7 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
goto err_stop_app;
mutex_unlock(&pf->lock);
+ devlink_register(devlink);
return 0;
@@ -751,8 +748,6 @@ err_shared_buf_unreg:
nfp_shared_buf_unregister(pf);
err_devlink_unreg:
cancel_work_sync(&pf->port_refresh_work);
- devlink_unregister(devlink);
-err_app_clean:
nfp_net_pf_app_clean(pf);
err_unmap:
nfp_net_pci_unmap_mem(pf);
@@ -763,6 +758,7 @@ void nfp_net_pci_remove(struct nfp_pf *pf)
{
struct nfp_net *nn, *next;
+ devlink_unregister(priv_to_devlink(pf));
mutex_lock(&pf->lock);
list_for_each_entry_safe(nn, next, &pf->vnics, vnic_list) {
if (!nfp_net_is_data_vnic(nn))
@@ -779,7 +775,6 @@ void nfp_net_pci_remove(struct nfp_pf *pf)
nfp_devlink_params_unregister(pf);
nfp_shared_buf_unregister(pf);
- devlink_unregister(priv_to_devlink(pf));
nfp_net_pf_free_irqs(pf);
nfp_net_pf_app_clean(pf);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
index 3b8e675087de..369f6ae700c7 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
@@ -499,8 +499,7 @@ struct nfp_reprs *nfp_reprs_alloc(unsigned int num_reprs)
{
struct nfp_reprs *reprs;
- reprs = kzalloc(sizeof(*reprs) +
- num_reprs * sizeof(struct net_device *), GFP_KERNEL);
+ reprs = kzalloc(struct_size(reprs, reprs, num_reprs), GFP_KERNEL);
if (!reprs)
return NULL;
reprs->num_reprs = num_reprs;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
index c0e2f4394aef..87f2268b16d6 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
@@ -58,7 +58,7 @@ static void nfp_netvf_get_mac_addr(struct nfp_net *nn)
return;
}
- ether_addr_copy(nn->dp.netdev->dev_addr, mac_addr);
+ eth_hw_addr_set(nn->dp.netdev, mac_addr);
ether_addr_copy(nn->dp.netdev->perm_addr, mac_addr);
}
diff --git a/drivers/net/ethernet/ni/nixge.c b/drivers/net/ethernet/ni/nixge.c
index 346145d3180e..cfeb7620ae20 100644
--- a/drivers/net/ethernet/ni/nixge.c
+++ b/drivers/net/ethernet/ni/nixge.c
@@ -1283,7 +1283,7 @@ static int nixge_probe(struct platform_device *pdev)
mac_addr = nixge_get_nvmem_address(&pdev->dev);
if (mac_addr && is_valid_ether_addr(mac_addr)) {
- ether_addr_copy(ndev->dev_addr, mac_addr);
+ eth_hw_addr_set(ndev, mac_addr);
kfree(mac_addr);
} else {
eth_hw_addr_random(ndev);
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index ef3fb4cc90af..9b530d7509a4 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -3175,7 +3175,7 @@ static int nv_set_mac_address(struct net_device *dev, void *addr)
return -EADDRNOTAVAIL;
/* synchronized against open : rtnl_lock() held by caller */
- memcpy(dev->dev_addr, macaddr->sa_data, ETH_ALEN);
+ eth_hw_addr_set(dev, macaddr->sa_data);
if (netif_running(dev)) {
netif_tx_lock_bh(dev);
@@ -5711,6 +5711,7 @@ static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
u32 phystate_orig = 0, phystate;
int phyinitialized = 0;
static int printed_version;
+ u8 mac[ETH_ALEN];
if (!printed_version++)
pr_info("Reverse Engineered nForce ethernet driver. Version %s.\n",
@@ -5884,50 +5885,52 @@ static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
txreg = readl(base + NvRegTransmitPoll);
if (id->driver_data & DEV_HAS_CORRECT_MACADDR) {
/* mac address is already in correct order */
- dev->dev_addr[0] = (np->orig_mac[0] >> 0) & 0xff;
- dev->dev_addr[1] = (np->orig_mac[0] >> 8) & 0xff;
- dev->dev_addr[2] = (np->orig_mac[0] >> 16) & 0xff;
- dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff;
- dev->dev_addr[4] = (np->orig_mac[1] >> 0) & 0xff;
- dev->dev_addr[5] = (np->orig_mac[1] >> 8) & 0xff;
+ mac[0] = (np->orig_mac[0] >> 0) & 0xff;
+ mac[1] = (np->orig_mac[0] >> 8) & 0xff;
+ mac[2] = (np->orig_mac[0] >> 16) & 0xff;
+ mac[3] = (np->orig_mac[0] >> 24) & 0xff;
+ mac[4] = (np->orig_mac[1] >> 0) & 0xff;
+ mac[5] = (np->orig_mac[1] >> 8) & 0xff;
} else if (txreg & NVREG_TRANSMITPOLL_MAC_ADDR_REV) {
/* mac address is already in correct order */
- dev->dev_addr[0] = (np->orig_mac[0] >> 0) & 0xff;
- dev->dev_addr[1] = (np->orig_mac[0] >> 8) & 0xff;
- dev->dev_addr[2] = (np->orig_mac[0] >> 16) & 0xff;
- dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff;
- dev->dev_addr[4] = (np->orig_mac[1] >> 0) & 0xff;
- dev->dev_addr[5] = (np->orig_mac[1] >> 8) & 0xff;
+ mac[0] = (np->orig_mac[0] >> 0) & 0xff;
+ mac[1] = (np->orig_mac[0] >> 8) & 0xff;
+ mac[2] = (np->orig_mac[0] >> 16) & 0xff;
+ mac[3] = (np->orig_mac[0] >> 24) & 0xff;
+ mac[4] = (np->orig_mac[1] >> 0) & 0xff;
+ mac[5] = (np->orig_mac[1] >> 8) & 0xff;
/*
* Set orig mac address back to the reversed version.
* This flag will be cleared during low power transition.
* Therefore, we should always put back the reversed address.
*/
- np->orig_mac[0] = (dev->dev_addr[5] << 0) + (dev->dev_addr[4] << 8) +
- (dev->dev_addr[3] << 16) + (dev->dev_addr[2] << 24);
- np->orig_mac[1] = (dev->dev_addr[1] << 0) + (dev->dev_addr[0] << 8);
+ np->orig_mac[0] = (mac[5] << 0) + (mac[4] << 8) +
+ (mac[3] << 16) + (mac[2] << 24);
+ np->orig_mac[1] = (mac[1] << 0) + (mac[0] << 8);
} else {
/* need to reverse mac address to correct order */
- dev->dev_addr[0] = (np->orig_mac[1] >> 8) & 0xff;
- dev->dev_addr[1] = (np->orig_mac[1] >> 0) & 0xff;
- dev->dev_addr[2] = (np->orig_mac[0] >> 24) & 0xff;
- dev->dev_addr[3] = (np->orig_mac[0] >> 16) & 0xff;
- dev->dev_addr[4] = (np->orig_mac[0] >> 8) & 0xff;
- dev->dev_addr[5] = (np->orig_mac[0] >> 0) & 0xff;
+ mac[0] = (np->orig_mac[1] >> 8) & 0xff;
+ mac[1] = (np->orig_mac[1] >> 0) & 0xff;
+ mac[2] = (np->orig_mac[0] >> 24) & 0xff;
+ mac[3] = (np->orig_mac[0] >> 16) & 0xff;
+ mac[4] = (np->orig_mac[0] >> 8) & 0xff;
+ mac[5] = (np->orig_mac[0] >> 0) & 0xff;
writel(txreg|NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll);
dev_dbg(&pci_dev->dev,
"%s: set workaround bit for reversed mac addr\n",
__func__);
}
- if (!is_valid_ether_addr(dev->dev_addr)) {
+ if (is_valid_ether_addr(mac)) {
+ eth_hw_addr_set(dev, mac);
+ } else {
/*
* Bad mac address. At least one bios sets the mac address
* to 01:23:45:67:89:ab
*/
dev_err(&pci_dev->dev,
"Invalid MAC address detected: %pM - Please complain to your hardware vendor.\n",
- dev->dev_addr);
+ mac);
eth_hw_addr_random(dev);
dev_err(&pci_dev->dev,
"Using random MAC address: %pM\n", dev->dev_addr);
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index d29fe562b3de..a63cc295b979 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -419,7 +419,7 @@ struct netdata_local {
/*
* MAC support functions
*/
-static void __lpc_set_mac(struct netdata_local *pldat, u8 *mac)
+static void __lpc_set_mac(struct netdata_local *pldat, const u8 *mac)
{
u32 tmp;
@@ -1093,7 +1093,7 @@ static int lpc_set_mac_address(struct net_device *ndev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(ndev->dev_addr, addr->sa_data, ETH_ALEN);
+ eth_hw_addr_set(ndev, addr->sa_data);
spin_lock_irqsave(&pldat->lock, flags);
@@ -1232,6 +1232,7 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
struct net_device *ndev;
dma_addr_t dma_handle;
struct resource *res;
+ u8 addr[ETH_ALEN];
int irq, ret;
/* Setup network interface for RMII or MII mode */
@@ -1347,10 +1348,11 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
pldat->phy_node = of_parse_phandle(np, "phy-handle", 0);
/* Get MAC address from current HW setting (POR state is all zeros) */
- __lpc_get_mac(pldat, ndev->dev_addr);
+ __lpc_get_mac(pldat, addr);
+ eth_hw_addr_set(ndev, addr);
if (!is_valid_ether_addr(ndev->dev_addr)) {
- of_get_mac_address(np, ndev->dev_addr);
+ of_get_ethdev_address(np, ndev);
}
if (!is_valid_ether_addr(ndev->dev_addr))
eth_hw_addr_random(ndev);
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index ec3e558f890e..71d234291fc5 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -2137,7 +2137,7 @@ static int pch_gbe_set_mac(struct net_device *netdev, void *addr)
if (!is_valid_ether_addr(skaddr->sa_data)) {
ret_val = -EADDRNOTAVAIL;
} else {
- memcpy(netdev->dev_addr, skaddr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, skaddr->sa_data);
memcpy(adapter->hw.mac.addr, skaddr->sa_data, netdev->addr_len);
pch_gbe_mac_mar_set(&adapter->hw, adapter->hw.mac.addr, 0);
ret_val = 0;
@@ -2555,7 +2555,7 @@ static int pch_gbe_probe(struct pci_dev *pdev,
goto err_free_adapter;
}
- memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
+ eth_hw_addr_set(netdev, adapter->hw.mac.addr);
if (!is_valid_ether_addr(netdev->dev_addr)) {
/*
* If the MAC is invalid (or just missing), display a warning
diff --git a/drivers/net/ethernet/packetengines/hamachi.c b/drivers/net/ethernet/packetengines/hamachi.c
index 1a6336a56d3d..9c408328be0d 100644
--- a/drivers/net/ethernet/packetengines/hamachi.c
+++ b/drivers/net/ethernet/packetengines/hamachi.c
@@ -592,6 +592,7 @@ static int hamachi_init_one(struct pci_dev *pdev,
void *ring_space;
dma_addr_t ring_dma;
int ret = -ENOMEM;
+ u8 addr[ETH_ALEN];
/* when built into the kernel, we only print version if device is found */
#ifndef MODULE
@@ -628,8 +629,8 @@ static int hamachi_init_one(struct pci_dev *pdev,
SET_NETDEV_DEV(dev, &pdev->dev);
for (i = 0; i < 6; i++)
- dev->dev_addr[i] = 1 ? read_eeprom(ioaddr, 4 + i)
- : readb(ioaddr + StationAddr + i);
+ addr[i] = read_eeprom(ioaddr, 4 + i);
+ eth_hw_addr_set(dev, addr);
#if ! defined(final_version)
if (hamachi_debug > 4)
diff --git a/drivers/net/ethernet/packetengines/yellowfin.c b/drivers/net/ethernet/packetengines/yellowfin.c
index f5cd8f51be7c..12105f62cbdd 100644
--- a/drivers/net/ethernet/packetengines/yellowfin.c
+++ b/drivers/net/ethernet/packetengines/yellowfin.c
@@ -384,6 +384,7 @@ static int yellowfin_init_one(struct pci_dev *pdev,
#else
int bar = 1;
#endif
+ u8 addr[ETH_ALEN];
/* when built into the kernel, we only print version if device is found */
#ifndef MODULE
@@ -416,12 +417,13 @@ static int yellowfin_init_one(struct pci_dev *pdev,
if (drv_flags & DontUseEeprom)
for (i = 0; i < 6; i++)
- dev->dev_addr[i] = ioread8(ioaddr + StnAddr + i);
+ addr[i] = ioread8(ioaddr + StnAddr + i);
else {
int ee_offset = (read_eeprom(ioaddr, 6) == 0xff ? 0x100 : 0);
for (i = 0; i < 6; i++)
- dev->dev_addr[i] = read_eeprom(ioaddr, ee_offset + i);
+ addr[i] = read_eeprom(ioaddr, ee_offset + i);
}
+ eth_hw_addr_set(dev, addr);
/* Reset the chip. */
iowrite32(0x80000000, ioaddr + DMACtrl);
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
index 7e096b2888b9..f0ace3a0e85c 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
@@ -221,7 +221,7 @@ static int pasemi_mac_set_mac_addr(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, addr->sa_data);
adr0 = dev->dev_addr[2] << 24 |
dev->dev_addr[3] << 16 |
@@ -1722,7 +1722,7 @@ pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = -ENODEV;
goto out;
}
- memcpy(dev->dev_addr, mac->mac_addr, sizeof(mac->mac_addr));
+ eth_hw_addr_set(dev, mac->mac_addr);
ret = mac_to_intf(mac);
if (ret < 0) {
diff --git a/drivers/net/ethernet/pensando/ionic/ionic.h b/drivers/net/ethernet/pensando/ionic/ionic.h
index 66204106f83e..5e25411ff02f 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic.h
@@ -19,6 +19,7 @@ struct ionic_lif;
#define PCI_DEVICE_ID_PENSANDO_IONIC_ETH_VF 0x1003
#define DEVCMD_TIMEOUT 10
+#define IONIC_ADMINQ_TIME_SLICE msecs_to_jiffies(100)
#define IONIC_PHC_UPDATE_NS 10000000000 /* 10s in nanoseconds */
#define NORMAL_PPB 1000000000 /* one billion parts per billion */
@@ -69,8 +70,13 @@ struct ionic_admin_ctx {
};
int ionic_adminq_post(struct ionic_lif *lif, struct ionic_admin_ctx *ctx);
-int ionic_adminq_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx, int err);
+int ionic_adminq_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx,
+ const int err, const bool do_msg);
int ionic_adminq_post_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx);
+int ionic_adminq_post_wait_nomsg(struct ionic_lif *lif, struct ionic_admin_ctx *ctx);
+void ionic_adminq_netdev_err_print(struct ionic_lif *lif, u8 opcode,
+ u8 status, int err);
+
int ionic_dev_cmd_wait(struct ionic *ionic, unsigned long max_wait);
int ionic_set_dma_mask(struct ionic *ionic);
int ionic_setup(struct ionic *ionic);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c b/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c
index 39f59849720d..c58217027564 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c
@@ -143,8 +143,6 @@ void ionic_debugfs_add_qcq(struct ionic_lif *lif, struct ionic_qcq *qcq)
debugfs_create_u32("qid", 0400, q_dentry, &q->hw_index);
debugfs_create_u32("qtype", 0400, q_dentry, &q->hw_type);
debugfs_create_u64("drop", 0400, q_dentry, &q->drop);
- debugfs_create_u64("stop", 0400, q_dentry, &q->stop);
- debugfs_create_u64("wake", 0400, q_dentry, &q->wake);
debugfs_create_file("tail", 0400, q_dentry, q, &q_tail_fops);
debugfs_create_file("head", 0400, q_dentry, q, &q_head_fops);
@@ -228,6 +226,50 @@ static int netdev_show(struct seq_file *seq, void *v)
}
DEFINE_SHOW_ATTRIBUTE(netdev);
+static int lif_filters_show(struct seq_file *seq, void *v)
+{
+ struct ionic_lif *lif = seq->private;
+ struct ionic_rx_filter *f;
+ struct hlist_head *head;
+ struct hlist_node *tmp;
+ unsigned int i;
+
+ seq_puts(seq, "id flow state type filter\n");
+ spin_lock_bh(&lif->rx_filters.lock);
+ for (i = 0; i < IONIC_RX_FILTER_HLISTS; i++) {
+ head = &lif->rx_filters.by_id[i];
+ hlist_for_each_entry_safe(f, tmp, head, by_id) {
+ switch (le16_to_cpu(f->cmd.match)) {
+ case IONIC_RX_FILTER_MATCH_VLAN:
+ seq_printf(seq, "0x%04x 0x%08x 0x%02x vlan 0x%04x\n",
+ f->filter_id, f->flow_id, f->state,
+ le16_to_cpu(f->cmd.vlan.vlan));
+ break;
+ case IONIC_RX_FILTER_MATCH_MAC:
+ seq_printf(seq, "0x%04x 0x%08x 0x%02x mac %pM\n",
+ f->filter_id, f->flow_id, f->state,
+ f->cmd.mac.addr);
+ break;
+ case IONIC_RX_FILTER_MATCH_MAC_VLAN:
+ seq_printf(seq, "0x%04x 0x%08x 0x%02x macvl 0x%04x %pM\n",
+ f->filter_id, f->flow_id, f->state,
+ le16_to_cpu(f->cmd.vlan.vlan),
+ f->cmd.mac.addr);
+ break;
+ case IONIC_RX_FILTER_STEER_PKTCLASS:
+ seq_printf(seq, "0x%04x 0x%08x 0x%02x rxstr 0x%llx\n",
+ f->filter_id, f->flow_id, f->state,
+ le64_to_cpu(f->cmd.pkt_class));
+ break;
+ }
+ }
+ }
+ spin_unlock_bh(&lif->rx_filters.lock);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(lif_filters);
+
void ionic_debugfs_add_lif(struct ionic_lif *lif)
{
struct dentry *lif_dentry;
@@ -239,6 +281,8 @@ void ionic_debugfs_add_lif(struct ionic_lif *lif)
debugfs_create_file("netdev", 0400, lif->dentry,
lif->netdev, &netdev_fops);
+ debugfs_create_file("filters", 0400, lif->dentry,
+ lif, &lif_filters_fops);
}
void ionic_debugfs_del_lif(struct ionic_lif *lif)
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.c b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
index 0d6858ab511c..d57e80d44c9d 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
@@ -581,7 +581,6 @@ unsigned int ionic_cq_service(struct ionic_cq *cq, unsigned int work_to_do,
cq->done_color = !cq->done_color;
cq->tail_idx = (cq->tail_idx + 1) & (cq->num_descs - 1);
cq_info = &cq->info[cq->tail_idx];
- DEBUG_STATS_CQE_CNT(cq);
if (++work_done >= work_to_do)
break;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.h b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
index 8311086fb1f4..e5acf3bd62b2 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
@@ -220,9 +220,6 @@ struct ionic_queue {
unsigned int num_descs;
unsigned int max_sg_elems;
u64 features;
- u64 dbell_count;
- u64 stop;
- u64 wake;
u64 drop;
struct ionic_dev *idev;
unsigned int type;
@@ -269,7 +266,6 @@ struct ionic_cq {
bool done_color;
unsigned int num_descs;
unsigned int desc_size;
- u64 compl_count;
void *base;
dma_addr_t base_pa;
} ____cacheline_aligned_in_smp;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_devlink.c b/drivers/net/ethernet/pensando/ionic/ionic_devlink.c
index c7d0e195d176..4297ed9024c0 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_devlink.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_devlink.c
@@ -82,22 +82,16 @@ int ionic_devlink_register(struct ionic *ionic)
struct devlink_port_attrs attrs = {};
int err;
- err = devlink_register(dl);
- if (err) {
- dev_warn(ionic->dev, "devlink_register failed: %d\n", err);
- return err;
- }
-
attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
devlink_port_attrs_set(&ionic->dl_port, &attrs);
err = devlink_port_register(dl, &ionic->dl_port, 0);
if (err) {
dev_err(ionic->dev, "devlink_port_register failed: %d\n", err);
- devlink_unregister(dl);
return err;
}
devlink_port_type_eth_set(&ionic->dl_port, ionic->lif->netdev);
+ devlink_register(dl);
return 0;
}
@@ -105,6 +99,6 @@ void ionic_devlink_unregister(struct ionic *ionic)
{
struct devlink *dl = priv_to_devlink(ionic);
- devlink_port_unregister(&ionic->dl_port);
devlink_unregister(dl);
+ devlink_port_unregister(&ionic->dl_port);
}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
index 3de1a03839e2..6b45cae39a20 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
@@ -11,13 +11,6 @@
#include "ionic_ethtool.h"
#include "ionic_stats.h"
-static const char ionic_priv_flags_strings[][ETH_GSTRING_LEN] = {
-#define IONIC_PRIV_F_SW_DBG_STATS BIT(0)
- "sw-dbg-stats",
-};
-
-#define IONIC_PRIV_FLAGS_COUNT ARRAY_SIZE(ionic_priv_flags_strings)
-
static void ionic_get_stats_strings(struct ionic_lif *lif, u8 *buf)
{
u32 i;
@@ -59,9 +52,6 @@ static int ionic_get_sset_count(struct net_device *netdev, int sset)
case ETH_SS_STATS:
count = ionic_get_stats_count(lif);
break;
- case ETH_SS_PRIV_FLAGS:
- count = IONIC_PRIV_FLAGS_COUNT;
- break;
}
return count;
}
@@ -75,10 +65,6 @@ static void ionic_get_strings(struct net_device *netdev,
case ETH_SS_STATS:
ionic_get_stats_strings(lif, buf);
break;
- case ETH_SS_PRIV_FLAGS:
- memcpy(buf, ionic_priv_flags_strings,
- IONIC_PRIV_FLAGS_COUNT * ETH_GSTRING_LEN);
- break;
}
}
@@ -691,28 +677,6 @@ static int ionic_set_channels(struct net_device *netdev,
return err;
}
-static u32 ionic_get_priv_flags(struct net_device *netdev)
-{
- struct ionic_lif *lif = netdev_priv(netdev);
- u32 priv_flags = 0;
-
- if (test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state))
- priv_flags |= IONIC_PRIV_F_SW_DBG_STATS;
-
- return priv_flags;
-}
-
-static int ionic_set_priv_flags(struct net_device *netdev, u32 priv_flags)
-{
- struct ionic_lif *lif = netdev_priv(netdev);
-
- clear_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state);
- if (priv_flags & IONIC_PRIV_F_SW_DBG_STATS)
- set_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state);
-
- return 0;
-}
-
static int ionic_get_rxnfc(struct net_device *netdev,
struct ethtool_rxnfc *info, u32 *rules)
{
@@ -1013,8 +977,6 @@ static const struct ethtool_ops ionic_ethtool_ops = {
.get_strings = ionic_get_strings,
.get_ethtool_stats = ionic_get_stats,
.get_sset_count = ionic_get_sset_count,
- .get_priv_flags = ionic_get_priv_flags,
- .set_priv_flags = ionic_set_priv_flags,
.get_rxnfc = ionic_get_rxnfc,
.get_rxfh_indir_size = ionic_get_rxfh_indir_size,
.get_rxfh_key_size = ionic_get_rxfh_key_size,
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index 7f3322ce044c..63f8a8163b5f 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -287,11 +287,9 @@ static int ionic_qcq_enable(struct ionic_qcq *qcq)
return ionic_adminq_post_wait(lif, &ctx);
}
-static int ionic_qcq_disable(struct ionic_qcq *qcq, bool send_to_hw)
+static int ionic_qcq_disable(struct ionic_lif *lif, struct ionic_qcq *qcq, int fw_err)
{
struct ionic_queue *q;
- struct ionic_lif *lif;
- int err = 0;
struct ionic_admin_ctx ctx = {
.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
@@ -301,11 +299,12 @@ static int ionic_qcq_disable(struct ionic_qcq *qcq, bool send_to_hw)
},
};
- if (!qcq)
+ if (!qcq) {
+ netdev_err(lif->netdev, "%s: bad qcq\n", __func__);
return -ENXIO;
+ }
q = &qcq->q;
- lif = q->lif;
if (qcq->flags & IONIC_QCQ_F_INTR) {
struct ionic_dev *idev = &lif->ionic->idev;
@@ -318,17 +317,19 @@ static int ionic_qcq_disable(struct ionic_qcq *qcq, bool send_to_hw)
napi_disable(&qcq->napi);
}
- if (send_to_hw) {
- ctx.cmd.q_control.lif_index = cpu_to_le16(lif->index);
- ctx.cmd.q_control.type = q->type;
- ctx.cmd.q_control.index = cpu_to_le32(q->index);
- dev_dbg(lif->ionic->dev, "q_disable.index %d q_disable.qtype %d\n",
- ctx.cmd.q_control.index, ctx.cmd.q_control.type);
+ /* If there was a previous fw communcation error, don't bother with
+ * sending the adminq command and just return the same error value.
+ */
+ if (fw_err == -ETIMEDOUT || fw_err == -ENXIO)
+ return fw_err;
- err = ionic_adminq_post_wait(lif, &ctx);
- }
+ ctx.cmd.q_control.lif_index = cpu_to_le16(lif->index);
+ ctx.cmd.q_control.type = q->type;
+ ctx.cmd.q_control.index = cpu_to_le32(q->index);
+ dev_dbg(lif->ionic->dev, "q_disable.index %d q_disable.qtype %d\n",
+ ctx.cmd.q_control.index, ctx.cmd.q_control.type);
- return err;
+ return ionic_adminq_post_wait(lif, &ctx);
}
static void ionic_lif_qcq_deinit(struct ionic_lif *lif, struct ionic_qcq *qcq)
@@ -1241,137 +1242,6 @@ void ionic_get_stats64(struct net_device *netdev,
ns->tx_errors = ns->tx_aborted_errors;
}
-int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr)
-{
- struct ionic_admin_ctx ctx = {
- .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
- .cmd.rx_filter_add = {
- .opcode = IONIC_CMD_RX_FILTER_ADD,
- .lif_index = cpu_to_le16(lif->index),
- .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_MAC),
- },
- };
- int nfilters = le32_to_cpu(lif->identity->eth.max_ucast_filters);
- bool mc = is_multicast_ether_addr(addr);
- struct ionic_rx_filter *f;
- int err = 0;
-
- memcpy(ctx.cmd.rx_filter_add.mac.addr, addr, ETH_ALEN);
-
- spin_lock_bh(&lif->rx_filters.lock);
- f = ionic_rx_filter_by_addr(lif, addr);
- if (f) {
- /* don't bother if we already have it and it is sync'd */
- if (f->state == IONIC_FILTER_STATE_SYNCED) {
- spin_unlock_bh(&lif->rx_filters.lock);
- return 0;
- }
-
- /* mark preemptively as sync'd to block any parallel attempts */
- f->state = IONIC_FILTER_STATE_SYNCED;
- } else {
- /* save as SYNCED to catch any DEL requests while processing */
- err = ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx,
- IONIC_FILTER_STATE_SYNCED);
- }
- spin_unlock_bh(&lif->rx_filters.lock);
- if (err)
- return err;
-
- netdev_dbg(lif->netdev, "rx_filter add ADDR %pM\n", addr);
-
- /* Don't bother with the write to FW if we know there's no room,
- * we can try again on the next sync attempt.
- */
- if ((lif->nucast + lif->nmcast) >= nfilters)
- err = -ENOSPC;
- else
- err = ionic_adminq_post_wait(lif, &ctx);
-
- spin_lock_bh(&lif->rx_filters.lock);
- if (err && err != -EEXIST) {
- /* set the state back to NEW so we can try again later */
- f = ionic_rx_filter_by_addr(lif, addr);
- if (f && f->state == IONIC_FILTER_STATE_SYNCED) {
- f->state = IONIC_FILTER_STATE_NEW;
- set_bit(IONIC_LIF_F_FILTER_SYNC_NEEDED, lif->state);
- }
-
- spin_unlock_bh(&lif->rx_filters.lock);
-
- if (err == -ENOSPC)
- return 0;
- else
- return err;
- }
-
- if (mc)
- lif->nmcast++;
- else
- lif->nucast++;
-
- f = ionic_rx_filter_by_addr(lif, addr);
- if (f && f->state == IONIC_FILTER_STATE_OLD) {
- /* Someone requested a delete while we were adding
- * so update the filter info with the results from the add
- * and the data will be there for the delete on the next
- * sync cycle.
- */
- err = ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx,
- IONIC_FILTER_STATE_OLD);
- } else {
- err = ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx,
- IONIC_FILTER_STATE_SYNCED);
- }
-
- spin_unlock_bh(&lif->rx_filters.lock);
-
- return err;
-}
-
-int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr)
-{
- struct ionic_admin_ctx ctx = {
- .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
- .cmd.rx_filter_del = {
- .opcode = IONIC_CMD_RX_FILTER_DEL,
- .lif_index = cpu_to_le16(lif->index),
- },
- };
- struct ionic_rx_filter *f;
- int state;
- int err;
-
- spin_lock_bh(&lif->rx_filters.lock);
- f = ionic_rx_filter_by_addr(lif, addr);
- if (!f) {
- spin_unlock_bh(&lif->rx_filters.lock);
- return -ENOENT;
- }
-
- netdev_dbg(lif->netdev, "rx_filter del ADDR %pM (id %d)\n",
- addr, f->filter_id);
-
- state = f->state;
- ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id);
- ionic_rx_filter_free(lif, f);
-
- if (is_multicast_ether_addr(addr) && lif->nmcast)
- lif->nmcast--;
- else if (!is_multicast_ether_addr(addr) && lif->nucast)
- lif->nucast--;
-
- spin_unlock_bh(&lif->rx_filters.lock);
-
- if (state != IONIC_FILTER_STATE_NEW) {
- err = ionic_adminq_post_wait(lif, &ctx);
- if (err && err != -EEXIST)
- return err;
- }
-
- return 0;
-}
-
static int ionic_addr_add(struct net_device *netdev, const u8 *addr)
{
return ionic_lif_list_addr(netdev_priv(netdev), addr, ADD_ADDR);
@@ -1407,7 +1277,7 @@ void ionic_lif_rx_mode(struct ionic_lif *lif)
rx_mode |= (nd_flags & IFF_PROMISC) ? IONIC_RX_MODE_F_PROMISC : 0;
rx_mode |= (nd_flags & IFF_ALLMULTI) ? IONIC_RX_MODE_F_ALLMULTI : 0;
- /* sync the mac filters */
+ /* sync the filters */
ionic_rx_filter_sync(lif);
/* check for overflow state
@@ -1417,14 +1287,12 @@ void ionic_lif_rx_mode(struct ionic_lif *lif)
* to see if we can disable NIC PROMISC
*/
nfilters = le32_to_cpu(lif->identity->eth.max_ucast_filters);
- if ((lif->nucast + lif->nmcast) >= nfilters) {
+
+ if (((lif->nucast + lif->nmcast) >= nfilters) ||
+ (lif->max_vlans && lif->nvlans >= lif->max_vlans)) {
rx_mode |= IONIC_RX_MODE_F_PROMISC;
rx_mode |= IONIC_RX_MODE_F_ALLMULTI;
- lif->uc_overflow = true;
- lif->mc_overflow = true;
- } else if (lif->uc_overflow) {
- lif->uc_overflow = false;
- lif->mc_overflow = false;
+ } else {
if (!(nd_flags & IFF_PROMISC))
rx_mode &= ~IONIC_RX_MODE_F_PROMISC;
if (!(nd_flags & IFF_ALLMULTI))
@@ -1809,59 +1677,30 @@ static int ionic_vlan_rx_add_vid(struct net_device *netdev, __be16 proto,
u16 vid)
{
struct ionic_lif *lif = netdev_priv(netdev);
- struct ionic_admin_ctx ctx = {
- .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
- .cmd.rx_filter_add = {
- .opcode = IONIC_CMD_RX_FILTER_ADD,
- .lif_index = cpu_to_le16(lif->index),
- .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_VLAN),
- .vlan.vlan = cpu_to_le16(vid),
- },
- };
int err;
- netdev_dbg(netdev, "rx_filter add VLAN %d\n", vid);
- err = ionic_adminq_post_wait(lif, &ctx);
+ err = ionic_lif_vlan_add(lif, vid);
if (err)
return err;
- spin_lock_bh(&lif->rx_filters.lock);
- err = ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx,
- IONIC_FILTER_STATE_SYNCED);
- spin_unlock_bh(&lif->rx_filters.lock);
+ ionic_lif_rx_mode(lif);
- return err;
+ return 0;
}
static int ionic_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto,
u16 vid)
{
struct ionic_lif *lif = netdev_priv(netdev);
- struct ionic_admin_ctx ctx = {
- .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
- .cmd.rx_filter_del = {
- .opcode = IONIC_CMD_RX_FILTER_DEL,
- .lif_index = cpu_to_le16(lif->index),
- },
- };
- struct ionic_rx_filter *f;
-
- spin_lock_bh(&lif->rx_filters.lock);
-
- f = ionic_rx_filter_by_vlan(lif, vid);
- if (!f) {
- spin_unlock_bh(&lif->rx_filters.lock);
- return -ENOENT;
- }
+ int err;
- netdev_dbg(netdev, "rx_filter del VLAN %d (id %d)\n",
- vid, f->filter_id);
+ err = ionic_lif_vlan_del(lif, vid);
+ if (err)
+ return err;
- ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id);
- ionic_rx_filter_free(lif, f);
- spin_unlock_bh(&lif->rx_filters.lock);
+ ionic_lif_rx_mode(lif);
- return ionic_adminq_post_wait(lif, &ctx);
+ return 0;
}
int ionic_lif_rss_config(struct ionic_lif *lif, const u16 types,
@@ -1953,19 +1792,19 @@ static void ionic_txrx_disable(struct ionic_lif *lif)
if (lif->txqcqs) {
for (i = 0; i < lif->nxqs; i++)
- err = ionic_qcq_disable(lif->txqcqs[i], (err != -ETIMEDOUT));
+ err = ionic_qcq_disable(lif, lif->txqcqs[i], err);
}
if (lif->hwstamp_txq)
- err = ionic_qcq_disable(lif->hwstamp_txq, (err != -ETIMEDOUT));
+ err = ionic_qcq_disable(lif, lif->hwstamp_txq, err);
if (lif->rxqcqs) {
for (i = 0; i < lif->nxqs; i++)
- err = ionic_qcq_disable(lif->rxqcqs[i], (err != -ETIMEDOUT));
+ err = ionic_qcq_disable(lif, lif->rxqcqs[i], err);
}
if (lif->hwstamp_rxq)
- err = ionic_qcq_disable(lif->hwstamp_rxq, (err != -ETIMEDOUT));
+ err = ionic_qcq_disable(lif, lif->hwstamp_rxq, err);
ionic_lif_quiesce(lif);
}
@@ -2165,7 +2004,7 @@ static int ionic_txrx_enable(struct ionic_lif *lif)
err = ionic_qcq_enable(lif->txqcqs[i]);
if (err) {
- derr = ionic_qcq_disable(lif->rxqcqs[i], (err != -ETIMEDOUT));
+ derr = ionic_qcq_disable(lif, lif->rxqcqs[i], err);
goto err_out;
}
}
@@ -2187,13 +2026,13 @@ static int ionic_txrx_enable(struct ionic_lif *lif)
err_out_hwstamp_tx:
if (lif->hwstamp_rxq)
- derr = ionic_qcq_disable(lif->hwstamp_rxq, (derr != -ETIMEDOUT));
+ derr = ionic_qcq_disable(lif, lif->hwstamp_rxq, derr);
err_out_hwstamp_rx:
i = lif->nxqs;
err_out:
while (i--) {
- derr = ionic_qcq_disable(lif->txqcqs[i], (derr != -ETIMEDOUT));
- derr = ionic_qcq_disable(lif->rxqcqs[i], (derr != -ETIMEDOUT));
+ derr = ionic_qcq_disable(lif, lif->txqcqs[i], derr);
+ derr = ionic_qcq_disable(lif, lif->rxqcqs[i], derr);
}
return err;
@@ -2896,6 +2735,9 @@ int ionic_lif_alloc(struct ionic *ionic)
snprintf(lif->name, sizeof(lif->name), "lif%u", lif->index);
+ mutex_init(&lif->queue_lock);
+ mutex_init(&lif->config_lock);
+
spin_lock_init(&lif->adminq_lock);
spin_lock_init(&lif->deferred.lock);
@@ -2909,7 +2751,7 @@ int ionic_lif_alloc(struct ionic *ionic)
if (!lif->info) {
dev_err(dev, "Failed to allocate lif info, aborting\n");
err = -ENOMEM;
- goto err_out_free_netdev;
+ goto err_out_free_mutex;
}
ionic_debugfs_add_lif(lif);
@@ -2944,6 +2786,9 @@ err_out_free_lif_info:
dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa);
lif->info = NULL;
lif->info_pa = 0;
+err_out_free_mutex:
+ mutex_destroy(&lif->config_lock);
+ mutex_destroy(&lif->queue_lock);
err_out_free_netdev:
free_netdev(lif->netdev);
lif = NULL;
@@ -2974,11 +2819,10 @@ static void ionic_lif_handle_fw_down(struct ionic_lif *lif)
netif_device_detach(lif->netdev);
+ mutex_lock(&lif->queue_lock);
if (test_bit(IONIC_LIF_F_UP, lif->state)) {
dev_info(ionic->dev, "Surprise FW stop, stopping queues\n");
- mutex_lock(&lif->queue_lock);
ionic_stop_queues(lif);
- mutex_unlock(&lif->queue_lock);
}
if (netif_running(lif->netdev)) {
@@ -2989,6 +2833,8 @@ static void ionic_lif_handle_fw_down(struct ionic_lif *lif)
ionic_reset(ionic);
ionic_qcqs_free(lif);
+ mutex_unlock(&lif->queue_lock);
+
dev_info(ionic->dev, "FW Down: LIFs stopped\n");
}
@@ -3012,9 +2858,12 @@ static void ionic_lif_handle_fw_up(struct ionic_lif *lif)
err = ionic_port_init(ionic);
if (err)
goto err_out;
+
+ mutex_lock(&lif->queue_lock);
+
err = ionic_qcqs_alloc(lif);
if (err)
- goto err_out;
+ goto err_unlock;
err = ionic_lif_init(lif);
if (err)
@@ -3035,6 +2884,8 @@ static void ionic_lif_handle_fw_up(struct ionic_lif *lif)
goto err_txrx_free;
}
+ mutex_unlock(&lif->queue_lock);
+
clear_bit(IONIC_LIF_F_FW_RESET, lif->state);
ionic_link_status_check_request(lif, CAN_SLEEP);
netif_device_attach(lif->netdev);
@@ -3051,6 +2902,8 @@ err_lifs_deinit:
ionic_lif_deinit(lif);
err_qcqs_free:
ionic_qcqs_free(lif);
+err_unlock:
+ mutex_unlock(&lif->queue_lock);
err_out:
dev_err(ionic->dev, "FW Up: LIFs restart failed - err %d\n", err);
}
@@ -3084,6 +2937,9 @@ void ionic_lif_free(struct ionic_lif *lif)
kfree(lif->dbid_inuse);
lif->dbid_inuse = NULL;
+ mutex_destroy(&lif->config_lock);
+ mutex_destroy(&lif->queue_lock);
+
/* free netdev & lif */
ionic_debugfs_del_lif(lif);
free_netdev(lif->netdev);
@@ -3106,8 +2962,6 @@ void ionic_lif_deinit(struct ionic_lif *lif)
ionic_lif_qcq_deinit(lif, lif->notifyqcq);
ionic_lif_qcq_deinit(lif, lif->adminqcq);
- mutex_destroy(&lif->config_lock);
- mutex_destroy(&lif->queue_lock);
ionic_lif_reset(lif);
}
@@ -3273,8 +3127,6 @@ int ionic_lif_init(struct ionic_lif *lif)
return err;
lif->hw_index = le16_to_cpu(comp.hw_index);
- mutex_init(&lif->queue_lock);
- mutex_init(&lif->config_lock);
/* now that we have the hw_index we can figure out our doorbell page */
lif->dbid_count = le32_to_cpu(lif->ionic->ident.dev.ndbpgs_per_lif);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.h b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
index 4915184f3efb..9f7ab2f17f93 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
@@ -14,9 +14,6 @@
#define IONIC_ADMINQ_LENGTH 16 /* must be a power of two */
#define IONIC_NOTIFYQ_LENGTH 64 /* must be a power of two */
-#define IONIC_MAX_NUM_NAPI_CNTR (NAPI_POLL_WEIGHT + 1)
-#define IONIC_MAX_NUM_SG_CNTR (IONIC_TX_MAX_SG_ELEMS + 1)
-
#define ADD_ADDR true
#define DEL_ADDR false
#define CAN_SLEEP true
@@ -37,7 +34,6 @@ struct ionic_tx_stats {
u64 clean;
u64 linearize;
u64 crc32_csum;
- u64 sg_cntr[IONIC_MAX_NUM_SG_CNTR];
u64 dma_map_err;
u64 hwstamp_valid;
u64 hwstamp_invalid;
@@ -48,7 +44,6 @@ struct ionic_rx_stats {
u64 bytes;
u64 csum_none;
u64 csum_complete;
- u64 buffers_posted;
u64 dropped;
u64 vlan_stripped;
u64 csum_error;
@@ -65,11 +60,6 @@ struct ionic_rx_stats {
#define IONIC_QCQ_F_RX_STATS BIT(4)
#define IONIC_QCQ_F_NOTIFYQ BIT(5)
-struct ionic_napi_stats {
- u64 poll_count;
- u64 work_done_cntr[IONIC_MAX_NUM_NAPI_CNTR];
-};
-
struct ionic_qcq {
void *q_base;
dma_addr_t q_base_pa;
@@ -85,7 +75,6 @@ struct ionic_qcq {
struct ionic_cq cq;
struct ionic_intr_info intr;
struct napi_struct napi;
- struct ionic_napi_stats napi_stats;
unsigned int flags;
struct dentry *dentry;
};
@@ -142,7 +131,6 @@ struct ionic_lif_sw_stats {
enum ionic_lif_state_flags {
IONIC_LIF_F_INITED,
- IONIC_LIF_F_SW_DEBUG_STATS,
IONIC_LIF_F_UP,
IONIC_LIF_F_LINK_CHECK_REQUESTED,
IONIC_LIF_F_FILTER_SYNC_NEEDED,
@@ -201,11 +189,11 @@ struct ionic_lif {
u16 rx_mode;
u64 hw_features;
bool registered;
- bool mc_overflow;
- bool uc_overflow;
u16 lif_type;
unsigned int nmcast;
unsigned int nucast;
+ unsigned int nvlans;
+ unsigned int max_vlans;
char name[IONIC_LIF_NAME_MAX_SZ];
union ionic_lif_identity *identity;
@@ -350,37 +338,4 @@ int ionic_lif_rss_config(struct ionic_lif *lif, u16 types,
void ionic_lif_rx_mode(struct ionic_lif *lif);
int ionic_reconfigure_queues(struct ionic_lif *lif,
struct ionic_queue_params *qparam);
-
-static inline void debug_stats_txq_post(struct ionic_queue *q, bool dbell)
-{
- struct ionic_txq_desc *desc = &q->txq[q->head_idx];
- u8 num_sg_elems;
-
- q->dbell_count += dbell;
-
- num_sg_elems = ((le64_to_cpu(desc->cmd) >> IONIC_TXQ_DESC_NSGE_SHIFT)
- & IONIC_TXQ_DESC_NSGE_MASK);
- if (num_sg_elems > (IONIC_MAX_NUM_SG_CNTR - 1))
- num_sg_elems = IONIC_MAX_NUM_SG_CNTR - 1;
-
- q->lif->txqstats[q->index].sg_cntr[num_sg_elems]++;
-}
-
-static inline void debug_stats_napi_poll(struct ionic_qcq *qcq,
- unsigned int work_done)
-{
- qcq->napi_stats.poll_count++;
-
- if (work_done > (IONIC_MAX_NUM_NAPI_CNTR - 1))
- work_done = IONIC_MAX_NUM_NAPI_CNTR - 1;
-
- qcq->napi_stats.work_done_cntr[work_done]++;
-}
-
-#define DEBUG_STATS_CQE_CNT(cq) ((cq)->compl_count++)
-#define DEBUG_STATS_RX_BUFF_CNT(q) ((q)->lif->rxqstats[q->index].buffers_posted++)
-#define DEBUG_STATS_TXQ_POST(q, dbell) debug_stats_txq_post(q, dbell)
-#define DEBUG_STATS_NAPI_POLL(qcq, work_done) \
- debug_stats_napi_poll(qcq, work_done)
-
#endif /* _IONIC_LIF_H_ */
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c
index 6f07bf509efe..875f4ec42efe 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_main.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c
@@ -7,6 +7,7 @@
#include <linux/netdevice.h>
#include <linux/utsname.h>
#include <generated/utsrelease.h>
+#include <linux/ctype.h>
#include "ionic.h"
#include "ionic_bus.h"
@@ -211,24 +212,28 @@ static void ionic_adminq_flush(struct ionic_lif *lif)
spin_unlock_irqrestore(&lif->adminq_lock, irqflags);
}
+void ionic_adminq_netdev_err_print(struct ionic_lif *lif, u8 opcode,
+ u8 status, int err)
+{
+ netdev_err(lif->netdev, "%s (%d) failed: %s (%d)\n",
+ ionic_opcode_to_str(opcode), opcode,
+ ionic_error_to_str(status), err);
+}
+
static int ionic_adminq_check_err(struct ionic_lif *lif,
struct ionic_admin_ctx *ctx,
- bool timeout)
+ const bool timeout,
+ const bool do_msg)
{
- struct net_device *netdev = lif->netdev;
- const char *opcode_str;
- const char *status_str;
int err = 0;
if (ctx->comp.comp.status || timeout) {
- opcode_str = ionic_opcode_to_str(ctx->cmd.cmd.opcode);
- status_str = ionic_error_to_str(ctx->comp.comp.status);
err = timeout ? -ETIMEDOUT :
ionic_error_to_errno(ctx->comp.comp.status);
- netdev_err(netdev, "%s (%d) failed: %s (%d)\n",
- opcode_str, ctx->cmd.cmd.opcode,
- timeout ? "TIMEOUT" : status_str, err);
+ if (do_msg)
+ ionic_adminq_netdev_err_print(lif, ctx->cmd.cmd.opcode,
+ ctx->comp.comp.status, err);
if (timeout)
ionic_adminq_flush(lif);
@@ -297,24 +302,52 @@ err_out:
return err;
}
-int ionic_adminq_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx, int err)
+int ionic_adminq_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx,
+ const int err, const bool do_msg)
{
struct net_device *netdev = lif->netdev;
+ unsigned long time_limit;
+ unsigned long time_start;
+ unsigned long time_done;
unsigned long remaining;
const char *name;
+ name = ionic_opcode_to_str(ctx->cmd.cmd.opcode);
+
if (err) {
- if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) {
- name = ionic_opcode_to_str(ctx->cmd.cmd.opcode);
+ if (do_msg && !test_bit(IONIC_LIF_F_FW_RESET, lif->state))
netdev_err(netdev, "Posting of %s (%d) failed: %d\n",
name, ctx->cmd.cmd.opcode, err);
- }
return err;
}
- remaining = wait_for_completion_timeout(&ctx->work,
- HZ * (ulong)DEVCMD_TIMEOUT);
- return ionic_adminq_check_err(lif, ctx, (remaining == 0));
+ time_start = jiffies;
+ time_limit = time_start + HZ * (ulong)DEVCMD_TIMEOUT;
+ do {
+ remaining = wait_for_completion_timeout(&ctx->work,
+ IONIC_ADMINQ_TIME_SLICE);
+
+ /* check for done */
+ if (remaining)
+ break;
+
+ /* interrupt the wait if FW stopped */
+ if (test_bit(IONIC_LIF_F_FW_RESET, lif->state)) {
+ if (do_msg)
+ netdev_err(netdev, "%s (%d) interrupted, FW in reset\n",
+ name, ctx->cmd.cmd.opcode);
+ return -ENXIO;
+ }
+
+ } while (time_before(jiffies, time_limit));
+ time_done = jiffies;
+
+ dev_dbg(lif->ionic->dev, "%s: elapsed %d msecs\n",
+ __func__, jiffies_to_msecs(time_done - time_start));
+
+ return ionic_adminq_check_err(lif, ctx,
+ time_after_eq(time_done, time_limit),
+ do_msg);
}
int ionic_adminq_post_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx)
@@ -323,7 +356,16 @@ int ionic_adminq_post_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx)
err = ionic_adminq_post(lif, ctx);
- return ionic_adminq_wait(lif, ctx, err);
+ return ionic_adminq_wait(lif, ctx, err, true);
+}
+
+int ionic_adminq_post_wait_nomsg(struct ionic_lif *lif, struct ionic_admin_ctx *ctx)
+{
+ int err;
+
+ err = ionic_adminq_post(lif, ctx);
+
+ return ionic_adminq_wait(lif, ctx, err, false);
}
static void ionic_dev_cmd_clean(struct ionic *ionic)
@@ -450,13 +492,23 @@ int ionic_identify(struct ionic *ionic)
}
mutex_unlock(&ionic->dev_cmd_lock);
- dev_info(ionic->dev, "FW: %s\n", idev->dev_info.fw_version);
-
if (err) {
- dev_err(ionic->dev, "Cannot identify ionic: %dn", err);
+ dev_err(ionic->dev, "Cannot identify ionic: %d\n", err);
goto err_out;
}
+ if (isprint(idev->dev_info.fw_version[0]) &&
+ isascii(idev->dev_info.fw_version[0]))
+ dev_info(ionic->dev, "FW: %.*s\n",
+ (int)(sizeof(idev->dev_info.fw_version) - 1),
+ idev->dev_info.fw_version);
+ else
+ dev_info(ionic->dev, "FW: (invalid string) 0x%02x 0x%02x 0x%02x 0x%02x ...\n",
+ (u8)idev->dev_info.fw_version[0],
+ (u8)idev->dev_info.fw_version[1],
+ (u8)idev->dev_info.fw_version[2],
+ (u8)idev->dev_info.fw_version[3]);
+
err = ionic_lif_identify(ionic, IONIC_LIF_TYPE_CLASSIC,
&ionic->ident.lif);
if (err) {
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_phc.c b/drivers/net/ethernet/pensando/ionic/ionic_phc.c
index eed2db69d708..887046838b3b 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_phc.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_phc.c
@@ -348,7 +348,7 @@ static int ionic_phc_adjfine(struct ptp_clock_info *info, long scaled_ppm)
spin_unlock_irqrestore(&phc->lock, irqflags);
- return ionic_adminq_wait(phc->lif, &ctx, err);
+ return ionic_adminq_wait(phc->lif, &ctx, err, true);
}
static int ionic_phc_adjtime(struct ptp_clock_info *info, s64 delta)
@@ -373,7 +373,7 @@ static int ionic_phc_adjtime(struct ptp_clock_info *info, s64 delta)
spin_unlock_irqrestore(&phc->lock, irqflags);
- return ionic_adminq_wait(phc->lif, &ctx, err);
+ return ionic_adminq_wait(phc->lif, &ctx, err, true);
}
static int ionic_phc_settime64(struct ptp_clock_info *info,
@@ -402,7 +402,7 @@ static int ionic_phc_settime64(struct ptp_clock_info *info,
spin_unlock_irqrestore(&phc->lock, irqflags);
- return ionic_adminq_wait(phc->lif, &ctx, err);
+ return ionic_adminq_wait(phc->lif, &ctx, err, true);
}
static int ionic_phc_gettimex64(struct ptp_clock_info *info,
@@ -459,7 +459,7 @@ static long ionic_phc_aux_work(struct ptp_clock_info *info)
spin_unlock_irqrestore(&phc->lock, irqflags);
- ionic_adminq_wait(phc->lif, &ctx, err);
+ ionic_adminq_wait(phc->lif, &ctx, err, true);
return phc->aux_work_delay;
}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c
index 69728f9013cb..f6e785f949f9 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c
@@ -239,6 +239,21 @@ struct ionic_rx_filter *ionic_rx_filter_rxsteer(struct ionic_lif *lif)
return NULL;
}
+static struct ionic_rx_filter *ionic_rx_filter_find(struct ionic_lif *lif,
+ struct ionic_rx_filter_add_cmd *ac)
+{
+ switch (le16_to_cpu(ac->match)) {
+ case IONIC_RX_FILTER_MATCH_VLAN:
+ return ionic_rx_filter_by_vlan(lif, le16_to_cpu(ac->vlan.vlan));
+ case IONIC_RX_FILTER_MATCH_MAC:
+ return ionic_rx_filter_by_addr(lif, ac->mac.addr);
+ default:
+ netdev_err(lif->netdev, "unsupported filter match %d",
+ le16_to_cpu(ac->match));
+ return NULL;
+ }
+}
+
int ionic_lif_list_addr(struct ionic_lif *lif, const u8 *addr, bool mode)
{
struct ionic_rx_filter *f;
@@ -286,6 +301,228 @@ int ionic_lif_list_addr(struct ionic_lif *lif, const u8 *addr, bool mode)
return 0;
}
+static int ionic_lif_filter_add(struct ionic_lif *lif,
+ struct ionic_rx_filter_add_cmd *ac)
+{
+ struct ionic_admin_ctx ctx = {
+ .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
+ };
+ struct ionic_rx_filter *f;
+ int nfilters;
+ int err = 0;
+
+ ctx.cmd.rx_filter_add = *ac;
+ ctx.cmd.rx_filter_add.opcode = IONIC_CMD_RX_FILTER_ADD,
+ ctx.cmd.rx_filter_add.lif_index = cpu_to_le16(lif->index),
+
+ spin_lock_bh(&lif->rx_filters.lock);
+ f = ionic_rx_filter_find(lif, &ctx.cmd.rx_filter_add);
+ if (f) {
+ /* don't bother if we already have it and it is sync'd */
+ if (f->state == IONIC_FILTER_STATE_SYNCED) {
+ spin_unlock_bh(&lif->rx_filters.lock);
+ return 0;
+ }
+
+ /* mark preemptively as sync'd to block any parallel attempts */
+ f->state = IONIC_FILTER_STATE_SYNCED;
+ } else {
+ /* save as SYNCED to catch any DEL requests while processing */
+ err = ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx,
+ IONIC_FILTER_STATE_SYNCED);
+ }
+ spin_unlock_bh(&lif->rx_filters.lock);
+ if (err)
+ return err;
+
+ /* Don't bother with the write to FW if we know there's no room,
+ * we can try again on the next sync attempt.
+ * Since the FW doesn't have a way to tell us the vlan limit,
+ * we start max_vlans at 0 until we hit the ENOSPC error.
+ */
+ switch (le16_to_cpu(ctx.cmd.rx_filter_add.match)) {
+ case IONIC_RX_FILTER_MATCH_VLAN:
+ netdev_dbg(lif->netdev, "%s: rx_filter add VLAN %d\n",
+ __func__, ctx.cmd.rx_filter_add.vlan.vlan);
+ if (lif->max_vlans && lif->nvlans >= lif->max_vlans)
+ err = -ENOSPC;
+ break;
+ case IONIC_RX_FILTER_MATCH_MAC:
+ netdev_dbg(lif->netdev, "%s: rx_filter add ADDR %pM\n",
+ __func__, ctx.cmd.rx_filter_add.mac.addr);
+ nfilters = le32_to_cpu(lif->identity->eth.max_ucast_filters);
+ if ((lif->nucast + lif->nmcast) >= nfilters)
+ err = -ENOSPC;
+ break;
+ }
+
+ if (err != -ENOSPC)
+ err = ionic_adminq_post_wait_nomsg(lif, &ctx);
+
+ spin_lock_bh(&lif->rx_filters.lock);
+
+ if (err && err != -EEXIST) {
+ /* set the state back to NEW so we can try again later */
+ f = ionic_rx_filter_find(lif, &ctx.cmd.rx_filter_add);
+ if (f && f->state == IONIC_FILTER_STATE_SYNCED) {
+ f->state = IONIC_FILTER_STATE_NEW;
+
+ /* If -ENOSPC we won't waste time trying to sync again
+ * until there is a delete that might make room
+ */
+ if (err != -ENOSPC)
+ set_bit(IONIC_LIF_F_FILTER_SYNC_NEEDED, lif->state);
+ }
+
+ spin_unlock_bh(&lif->rx_filters.lock);
+
+ if (err == -ENOSPC) {
+ if (le16_to_cpu(ctx.cmd.rx_filter_add.match) == IONIC_RX_FILTER_MATCH_VLAN)
+ lif->max_vlans = lif->nvlans;
+ return 0;
+ }
+
+ ionic_adminq_netdev_err_print(lif, ctx.cmd.cmd.opcode,
+ ctx.comp.comp.status, err);
+ switch (le16_to_cpu(ctx.cmd.rx_filter_add.match)) {
+ case IONIC_RX_FILTER_MATCH_VLAN:
+ netdev_info(lif->netdev, "rx_filter add failed: VLAN %d\n",
+ ctx.cmd.rx_filter_add.vlan.vlan);
+ break;
+ case IONIC_RX_FILTER_MATCH_MAC:
+ netdev_info(lif->netdev, "rx_filter add failed: ADDR %pM\n",
+ ctx.cmd.rx_filter_add.mac.addr);
+ break;
+ }
+
+ return err;
+ }
+
+ switch (le16_to_cpu(ctx.cmd.rx_filter_add.match)) {
+ case IONIC_RX_FILTER_MATCH_VLAN:
+ lif->nvlans++;
+ break;
+ case IONIC_RX_FILTER_MATCH_MAC:
+ if (is_multicast_ether_addr(ctx.cmd.rx_filter_add.mac.addr))
+ lif->nmcast++;
+ else
+ lif->nucast++;
+ break;
+ }
+
+ f = ionic_rx_filter_find(lif, &ctx.cmd.rx_filter_add);
+ if (f && f->state == IONIC_FILTER_STATE_OLD) {
+ /* Someone requested a delete while we were adding
+ * so update the filter info with the results from the add
+ * and the data will be there for the delete on the next
+ * sync cycle.
+ */
+ err = ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx,
+ IONIC_FILTER_STATE_OLD);
+ } else {
+ err = ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx,
+ IONIC_FILTER_STATE_SYNCED);
+ }
+
+ spin_unlock_bh(&lif->rx_filters.lock);
+
+ return err;
+}
+
+int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr)
+{
+ struct ionic_rx_filter_add_cmd ac = {
+ .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_MAC),
+ };
+
+ memcpy(&ac.mac.addr, addr, ETH_ALEN);
+
+ return ionic_lif_filter_add(lif, &ac);
+}
+
+int ionic_lif_vlan_add(struct ionic_lif *lif, const u16 vid)
+{
+ struct ionic_rx_filter_add_cmd ac = {
+ .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_VLAN),
+ .vlan.vlan = cpu_to_le16(vid),
+ };
+
+ return ionic_lif_filter_add(lif, &ac);
+}
+
+static int ionic_lif_filter_del(struct ionic_lif *lif,
+ struct ionic_rx_filter_add_cmd *ac)
+{
+ struct ionic_admin_ctx ctx = {
+ .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
+ .cmd.rx_filter_del = {
+ .opcode = IONIC_CMD_RX_FILTER_DEL,
+ .lif_index = cpu_to_le16(lif->index),
+ },
+ };
+ struct ionic_rx_filter *f;
+ int state;
+ int err;
+
+ spin_lock_bh(&lif->rx_filters.lock);
+ f = ionic_rx_filter_find(lif, ac);
+ if (!f) {
+ spin_unlock_bh(&lif->rx_filters.lock);
+ return -ENOENT;
+ }
+
+ switch (le16_to_cpu(ac->match)) {
+ case IONIC_RX_FILTER_MATCH_VLAN:
+ netdev_dbg(lif->netdev, "%s: rx_filter del VLAN %d id %d\n",
+ __func__, ac->vlan.vlan, f->filter_id);
+ lif->nvlans--;
+ break;
+ case IONIC_RX_FILTER_MATCH_MAC:
+ netdev_dbg(lif->netdev, "%s: rx_filter del ADDR %pM id %d\n",
+ __func__, ac->mac.addr, f->filter_id);
+ if (is_multicast_ether_addr(ac->mac.addr) && lif->nmcast)
+ lif->nmcast--;
+ else if (!is_multicast_ether_addr(ac->mac.addr) && lif->nucast)
+ lif->nucast--;
+ break;
+ }
+
+ state = f->state;
+ ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id);
+ ionic_rx_filter_free(lif, f);
+
+ spin_unlock_bh(&lif->rx_filters.lock);
+
+ if (state != IONIC_FILTER_STATE_NEW) {
+ err = ionic_adminq_post_wait(lif, &ctx);
+ if (err && err != -EEXIST)
+ return err;
+ }
+
+ return 0;
+}
+
+int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr)
+{
+ struct ionic_rx_filter_add_cmd ac = {
+ .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_MAC),
+ };
+
+ memcpy(&ac.mac.addr, addr, ETH_ALEN);
+
+ return ionic_lif_filter_del(lif, &ac);
+}
+
+int ionic_lif_vlan_del(struct ionic_lif *lif, const u16 vid)
+{
+ struct ionic_rx_filter_add_cmd ac = {
+ .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_VLAN),
+ .vlan.vlan = cpu_to_le16(vid),
+ };
+
+ return ionic_lif_filter_del(lif, &ac);
+}
+
struct sync_item {
struct list_head list;
struct ionic_rx_filter f;
@@ -340,14 +577,14 @@ loop_out:
* they can clear room for some new filters
*/
list_for_each_entry_safe(sync_item, spos, &sync_del_list, list) {
- (void)ionic_lif_addr_del(lif, sync_item->f.cmd.mac.addr);
+ (void)ionic_lif_filter_del(lif, &sync_item->f.cmd);
list_del(&sync_item->list);
devm_kfree(dev, sync_item);
}
list_for_each_entry_safe(sync_item, spos, &sync_add_list, list) {
- (void)ionic_lif_addr_add(lif, sync_item->f.cmd.mac.addr);
+ (void)ionic_lif_filter_add(lif, &sync_item->f.cmd);
list_del(&sync_item->list);
devm_kfree(dev, sync_item);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.h b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.h
index a66e35f0833b..87b2666f248b 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.h
@@ -44,5 +44,7 @@ struct ionic_rx_filter *ionic_rx_filter_rxsteer(struct ionic_lif *lif);
void ionic_rx_filter_sync(struct ionic_lif *lif);
int ionic_lif_list_addr(struct ionic_lif *lif, const u8 *addr, bool mode);
int ionic_rx_filters_need_sync(struct ionic_lif *lif);
+int ionic_lif_vlan_add(struct ionic_lif *lif, const u16 vid);
+int ionic_lif_vlan_del(struct ionic_lif *lif, const u16 vid);
#endif /* _IONIC_RX_FILTER_H_ */
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_stats.c b/drivers/net/ethernet/pensando/ionic/ionic_stats.c
index c14de5fcedea..fd6806b4a1b9 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_stats.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_stats.c
@@ -151,33 +151,11 @@ static const struct ionic_stat_desc ionic_rx_stats_desc[] = {
IONIC_RX_STAT_DESC(vlan_stripped),
};
-static const struct ionic_stat_desc ionic_txq_stats_desc[] = {
- IONIC_TX_Q_STAT_DESC(stop),
- IONIC_TX_Q_STAT_DESC(wake),
- IONIC_TX_Q_STAT_DESC(drop),
- IONIC_TX_Q_STAT_DESC(dbell_count),
-};
-
-static const struct ionic_stat_desc ionic_dbg_cq_stats_desc[] = {
- IONIC_CQ_STAT_DESC(compl_count),
-};
-
-static const struct ionic_stat_desc ionic_dbg_intr_stats_desc[] = {
- IONIC_INTR_STAT_DESC(rearm_count),
-};
-
-static const struct ionic_stat_desc ionic_dbg_napi_stats_desc[] = {
- IONIC_NAPI_STAT_DESC(poll_count),
-};
#define IONIC_NUM_LIF_STATS ARRAY_SIZE(ionic_lif_stats_desc)
#define IONIC_NUM_PORT_STATS ARRAY_SIZE(ionic_port_stats_desc)
#define IONIC_NUM_TX_STATS ARRAY_SIZE(ionic_tx_stats_desc)
#define IONIC_NUM_RX_STATS ARRAY_SIZE(ionic_rx_stats_desc)
-#define IONIC_NUM_TX_Q_STATS ARRAY_SIZE(ionic_txq_stats_desc)
-#define IONIC_NUM_DBG_CQ_STATS ARRAY_SIZE(ionic_dbg_cq_stats_desc)
-#define IONIC_NUM_DBG_INTR_STATS ARRAY_SIZE(ionic_dbg_intr_stats_desc)
-#define IONIC_NUM_DBG_NAPI_STATS ARRAY_SIZE(ionic_dbg_napi_stats_desc)
#define MAX_Q(lif) ((lif)->netdev->real_num_tx_queues)
@@ -253,21 +231,6 @@ static u64 ionic_sw_stats_get_count(struct ionic_lif *lif)
total += tx_queues * IONIC_NUM_TX_STATS;
total += rx_queues * IONIC_NUM_RX_STATS;
- if (test_bit(IONIC_LIF_F_UP, lif->state) &&
- test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state)) {
- /* tx debug stats */
- total += tx_queues * (IONIC_NUM_DBG_CQ_STATS +
- IONIC_NUM_TX_Q_STATS +
- IONIC_NUM_DBG_INTR_STATS +
- IONIC_MAX_NUM_SG_CNTR);
-
- /* rx debug stats */
- total += rx_queues * (IONIC_NUM_DBG_CQ_STATS +
- IONIC_NUM_DBG_INTR_STATS +
- IONIC_NUM_DBG_NAPI_STATS +
- IONIC_MAX_NUM_NAPI_CNTR);
- }
-
return total;
}
@@ -279,22 +242,6 @@ static void ionic_sw_stats_get_tx_strings(struct ionic_lif *lif, u8 **buf,
for (i = 0; i < IONIC_NUM_TX_STATS; i++)
ethtool_sprintf(buf, "tx_%d_%s", q_num,
ionic_tx_stats_desc[i].name);
-
- if (!test_bit(IONIC_LIF_F_UP, lif->state) ||
- !test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state))
- return;
-
- for (i = 0; i < IONIC_NUM_TX_Q_STATS; i++)
- ethtool_sprintf(buf, "txq_%d_%s", q_num,
- ionic_txq_stats_desc[i].name);
- for (i = 0; i < IONIC_NUM_DBG_CQ_STATS; i++)
- ethtool_sprintf(buf, "txq_%d_cq_%s", q_num,
- ionic_dbg_cq_stats_desc[i].name);
- for (i = 0; i < IONIC_NUM_DBG_INTR_STATS; i++)
- ethtool_sprintf(buf, "txq_%d_intr_%s", q_num,
- ionic_dbg_intr_stats_desc[i].name);
- for (i = 0; i < IONIC_MAX_NUM_SG_CNTR; i++)
- ethtool_sprintf(buf, "txq_%d_sg_cntr_%d", q_num, i);
}
static void ionic_sw_stats_get_rx_strings(struct ionic_lif *lif, u8 **buf,
@@ -305,22 +252,6 @@ static void ionic_sw_stats_get_rx_strings(struct ionic_lif *lif, u8 **buf,
for (i = 0; i < IONIC_NUM_RX_STATS; i++)
ethtool_sprintf(buf, "rx_%d_%s", q_num,
ionic_rx_stats_desc[i].name);
-
- if (!test_bit(IONIC_LIF_F_UP, lif->state) ||
- !test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state))
- return;
-
- for (i = 0; i < IONIC_NUM_DBG_CQ_STATS; i++)
- ethtool_sprintf(buf, "rxq_%d_cq_%s", q_num,
- ionic_dbg_cq_stats_desc[i].name);
- for (i = 0; i < IONIC_NUM_DBG_INTR_STATS; i++)
- ethtool_sprintf(buf, "rxq_%d_intr_%s", q_num,
- ionic_dbg_intr_stats_desc[i].name);
- for (i = 0; i < IONIC_NUM_DBG_NAPI_STATS; i++)
- ethtool_sprintf(buf, "rxq_%d_napi_%s", q_num,
- ionic_dbg_napi_stats_desc[i].name);
- for (i = 0; i < IONIC_MAX_NUM_NAPI_CNTR; i++)
- ethtool_sprintf(buf, "rxq_%d_napi_work_done_%d", q_num, i);
}
static void ionic_sw_stats_get_strings(struct ionic_lif *lif, u8 **buf)
@@ -350,7 +281,6 @@ static void ionic_sw_stats_get_txq_values(struct ionic_lif *lif, u64 **buf,
int q_num)
{
struct ionic_tx_stats *txstats;
- struct ionic_qcq *txqcq;
int i;
txstats = &lif->txqstats[q_num];
@@ -359,38 +289,12 @@ static void ionic_sw_stats_get_txq_values(struct ionic_lif *lif, u64 **buf,
**buf = IONIC_READ_STAT64(txstats, &ionic_tx_stats_desc[i]);
(*buf)++;
}
-
- if (!test_bit(IONIC_LIF_F_UP, lif->state) ||
- !test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state))
- return;
-
- txqcq = lif->txqcqs[q_num];
- for (i = 0; i < IONIC_NUM_TX_Q_STATS; i++) {
- **buf = IONIC_READ_STAT64(&txqcq->q,
- &ionic_txq_stats_desc[i]);
- (*buf)++;
- }
- for (i = 0; i < IONIC_NUM_DBG_CQ_STATS; i++) {
- **buf = IONIC_READ_STAT64(&txqcq->cq,
- &ionic_dbg_cq_stats_desc[i]);
- (*buf)++;
- }
- for (i = 0; i < IONIC_NUM_DBG_INTR_STATS; i++) {
- **buf = IONIC_READ_STAT64(&txqcq->intr,
- &ionic_dbg_intr_stats_desc[i]);
- (*buf)++;
- }
- for (i = 0; i < IONIC_MAX_NUM_SG_CNTR; i++) {
- **buf = txstats->sg_cntr[i];
- (*buf)++;
- }
}
static void ionic_sw_stats_get_rxq_values(struct ionic_lif *lif, u64 **buf,
int q_num)
{
struct ionic_rx_stats *rxstats;
- struct ionic_qcq *rxqcq;
int i;
rxstats = &lif->rxqstats[q_num];
@@ -399,31 +303,6 @@ static void ionic_sw_stats_get_rxq_values(struct ionic_lif *lif, u64 **buf,
**buf = IONIC_READ_STAT64(rxstats, &ionic_rx_stats_desc[i]);
(*buf)++;
}
-
- if (!test_bit(IONIC_LIF_F_UP, lif->state) ||
- !test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state))
- return;
-
- rxqcq = lif->rxqcqs[q_num];
- for (i = 0; i < IONIC_NUM_DBG_CQ_STATS; i++) {
- **buf = IONIC_READ_STAT64(&rxqcq->cq,
- &ionic_dbg_cq_stats_desc[i]);
- (*buf)++;
- }
- for (i = 0; i < IONIC_NUM_DBG_INTR_STATS; i++) {
- **buf = IONIC_READ_STAT64(&rxqcq->intr,
- &ionic_dbg_intr_stats_desc[i]);
- (*buf)++;
- }
- for (i = 0; i < IONIC_NUM_DBG_NAPI_STATS; i++) {
- **buf = IONIC_READ_STAT64(&rxqcq->napi_stats,
- &ionic_dbg_napi_stats_desc[i]);
- (*buf)++;
- }
- for (i = 0; i < IONIC_MAX_NUM_NAPI_CNTR; i++) {
- **buf = rxqcq->napi_stats.work_done_cntr[i];
- (*buf)++;
- }
}
static void ionic_sw_stats_get_values(struct ionic_lif *lif, u64 **buf)
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
index 37c39581b659..94384f5d2a22 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
@@ -14,8 +14,6 @@
static inline void ionic_txq_post(struct ionic_queue *q, bool ring_dbell,
ionic_desc_cb cb_func, void *cb_arg)
{
- DEBUG_STATS_TXQ_POST(q, ring_dbell);
-
ionic_q_post(q, ring_dbell, cb_func, cb_arg);
}
@@ -23,8 +21,6 @@ static inline void ionic_rxq_post(struct ionic_queue *q, bool ring_dbell,
ionic_desc_cb cb_func, void *cb_arg)
{
ionic_q_post(q, ring_dbell, cb_func, cb_arg);
-
- DEBUG_STATS_RX_BUFF_CNT(q);
}
static inline struct netdev_queue *q_to_ndq(struct ionic_queue *q)
@@ -507,8 +503,6 @@ int ionic_tx_napi(struct napi_struct *napi, int budget)
work_done, flags);
}
- DEBUG_STATS_NAPI_POLL(qcq, work_done);
-
return work_done;
}
@@ -546,8 +540,6 @@ int ionic_rx_napi(struct napi_struct *napi, int budget)
work_done, flags);
}
- DEBUG_STATS_NAPI_POLL(qcq, work_done);
-
return work_done;
}
@@ -591,9 +583,6 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget)
tx_work_done + rx_work_done, flags);
}
- DEBUG_STATS_NAPI_POLL(qcq, rx_work_done);
- DEBUG_STATS_NAPI_POLL(qcq, tx_work_done);
-
return rx_work_done;
}
@@ -735,7 +724,6 @@ static void ionic_tx_clean(struct ionic_queue *q,
} else if (unlikely(__netif_subqueue_stopped(q->lif->netdev, qi))) {
netif_wake_subqueue(q->lif->netdev, qi);
- q->wake++;
}
desc_info->bytes = skb->len;
@@ -1174,7 +1162,6 @@ static int ionic_maybe_stop_tx(struct ionic_queue *q, int ndescs)
if (unlikely(!ionic_q_has_space(q, ndescs))) {
netif_stop_subqueue(q->lif->netdev, q->index);
- q->stop++;
stopped = 1;
/* Might race with ionic_tx_clean, check again */
@@ -1269,7 +1256,6 @@ netdev_tx_t ionic_start_xmit(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_OK;
err_out_drop:
- q->stop++;
q->drop++;
dev_kfree_skb(skb);
return NETDEV_TX_OK;
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index 344ea1143454..4cfab4434e80 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -463,6 +463,7 @@ netxen_read_mac_addr(struct netxen_adapter *adapter)
u64 mac_addr;
struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
+ u8 addr[ETH_ALEN];
if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
if (netxen_p3_get_mac_addr(adapter, &mac_addr) != 0)
@@ -474,7 +475,8 @@ netxen_read_mac_addr(struct netxen_adapter *adapter)
p = (unsigned char *)&mac_addr;
for (i = 0; i < 6; i++)
- netdev->dev_addr[i] = *(p + 5 - i);
+ addr[i] = *(p + 5 - i);
+ eth_hw_addr_set(netdev, addr);
memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len);
@@ -500,7 +502,7 @@ static int netxen_nic_set_mac(struct net_device *netdev, void *p)
}
memcpy(adapter->mac_addr, addr->sa_data, netdev->addr_len);
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, addr->sa_data);
adapter->macaddr_set(adapter, addr->sa_data);
if (netif_running(netdev)) {
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index d58e021614cd..d613095b78e0 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -23,6 +23,8 @@
#include <linux/qed/qed_if.h>
#include "qed_debug.h"
#include "qed_hsi.h"
+#include "qed_dbg_hsi.h"
+#include "qed_mfw_hsi.h"
extern const struct qed_common_ops qed_common_ops_pass;
@@ -89,14 +91,14 @@ static inline u32 qed_db_addr_vf(u32 cid, u32 DEMS)
}
#define ALIGNED_TYPE_SIZE(type_name, p_hwfn) \
- ((sizeof(type_name) + (u32)(1 << (p_hwfn->cdev->cache_shift)) - 1) & \
+ ((sizeof(type_name) + (u32)(1 << ((p_hwfn)->cdev->cache_shift)) - 1) & \
~((1 << (p_hwfn->cdev->cache_shift)) - 1))
-#define for_each_hwfn(cdev, i) for (i = 0; i < cdev->num_hwfns; i++)
+#define for_each_hwfn(cdev, i) for (i = 0; i < (cdev)->num_hwfns; i++)
#define D_TRINE(val, cond1, cond2, true1, true2, def) \
- (val == (cond1) ? true1 : \
- (val == (cond2) ? true2 : def))
+ ((val) == (cond1) ? true1 : \
+ ((val) == (cond2) ? true2 : def))
/* forward */
struct qed_ptt_pool;
@@ -510,7 +512,7 @@ enum qed_hsi_def_type {
struct qed_simd_fp_handler {
void *token;
- void (*func)(void *);
+ void (*func)(void *cookie);
};
enum qed_slowpath_wq_flag {
@@ -703,8 +705,6 @@ struct qed_dev {
#define QED_IS_BB_B0(dev) (QED_IS_BB(dev) && CHIP_REV_IS_B0(dev))
#define QED_IS_AH(dev) ((dev)->type == QED_DEV_TYPE_AH)
#define QED_IS_K2(dev) QED_IS_AH(dev)
-#define QED_IS_E4(dev) (QED_IS_BB(dev) || QED_IS_AH(dev))
-#define QED_IS_E5(dev) ((dev)->type == QED_DEV_TYPE_E5)
u16 vendor_id;
@@ -875,14 +875,14 @@ u32 qed_get_hsi_def_val(struct qed_dev *cdev, enum qed_hsi_def_type type);
#define NUM_OF_BTB_BLOCKS(dev) \
qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_BTB_BLOCKS)
-
/**
- * @brief qed_concrete_to_sw_fid - get the sw function id from
- * the concrete value.
+ * qed_concrete_to_sw_fid(): Get the sw function id from
+ * the concrete value.
*
- * @param concrete_fid
+ * @cdev: Qed dev pointer.
+ * @concrete_fid: Concrete fid.
*
- * @return inline u8
+ * Return: inline u8.
*/
static inline u8 qed_concrete_to_sw_fid(struct qed_dev *cdev,
u32 concrete_fid)
@@ -902,7 +902,6 @@ static inline u8 qed_concrete_to_sw_fid(struct qed_dev *cdev,
}
#define PKT_LB_TC 9
-#define MAX_NUM_VOQS_E4 20
int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate);
void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev,
@@ -914,7 +913,7 @@ int qed_device_num_engines(struct qed_dev *cdev);
void qed_set_fw_mac_addr(__le16 *fw_msb,
__le16 *fw_mid, __le16 *fw_lsb, u8 *mac);
-#define QED_LEADING_HWFN(dev) (&dev->hwfns[0])
+#define QED_LEADING_HWFN(dev) (&(dev)->hwfns[0])
#define QED_IS_CMT(dev) ((dev)->num_hwfns > 1)
/* Macros for getting the engine-affinitized hwfn (FIR: fcoe,iscsi,roce) */
#define QED_FIR_AFFIN_HWFN(dev) (&(dev)->hwfns[dev->fir_affin])
@@ -935,7 +934,7 @@ void qed_set_fw_mac_addr(__le16 *fw_msb,
#define PQ_FLAGS_LLT (BIT(7))
#define PQ_FLAGS_MTC (BIT(8))
-/* physical queue index for cm context intialization */
+/* physical queue index for cm context initialization */
u16 qed_get_cm_pq_idx(struct qed_hwfn *p_hwfn, u32 pq_flags);
u16 qed_get_cm_pq_idx_mcos(struct qed_hwfn *p_hwfn, u8 tc);
u16 qed_get_cm_pq_idx_vf(struct qed_hwfn *p_hwfn, u16 vf);
@@ -947,12 +946,18 @@ void qed_db_recovery_dp(struct qed_hwfn *p_hwfn);
void qed_db_recovery_execute(struct qed_hwfn *p_hwfn);
bool qed_edpm_enabled(struct qed_hwfn *p_hwfn);
+#define GET_GTT_REG_ADDR(__base, __offset, __idx) \
+ ((__base) + __offset ## _GTT_OFFSET((__idx)))
+
+#define GET_GTT_BDQ_REG_ADDR(__base, __offset, __idx, __bdq_idx) \
+ ((__base) + __offset ## _GTT_OFFSET((__idx), (__bdq_idx)))
+
/* Other Linux specific common definitions */
#define DP_NAME(cdev) ((cdev)->name)
-#define REG_ADDR(cdev, offset) (void __iomem *)((u8 __iomem *)\
- (cdev->regview) + \
- (offset))
+#define REG_ADDR(cdev, offset) ((void __iomem *)((u8 __iomem *)\
+ ((cdev)->regview) + \
+ (offset)))
#define REG_RD(cdev, offset) readl(REG_ADDR(cdev, offset))
#define REG_WR(cdev, offset, val) writel((u32)val, REG_ADDR(cdev, offset))
@@ -960,7 +965,7 @@ bool qed_edpm_enabled(struct qed_hwfn *p_hwfn);
#define DOORBELL(cdev, db_addr, val) \
writel((u32)val, (void __iomem *)((u8 __iomem *)\
- (cdev->doorbells) + (db_addr)))
+ ((cdev)->doorbells) + (db_addr)))
#define MFW_PORT(_p_hwfn) ((_p_hwfn)->abs_pf_id % \
qed_device_num_ports((_p_hwfn)->cdev))
@@ -998,4 +1003,5 @@ int qed_llh_add_dst_tcp_port_filter(struct qed_dev *cdev, u16 dest_port);
void qed_llh_remove_src_tcp_port_filter(struct qed_dev *cdev, u16 src_port);
void qed_llh_remove_dst_tcp_port_filter(struct qed_dev *cdev, u16 src_port);
void qed_llh_clear_all_filters(struct qed_dev *cdev);
+unsigned long qed_get_epoch_time(void);
#endif /* _QED_H */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index cb0f2a3a1ac9..452494f8c298 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -54,22 +54,22 @@
/* connection context union */
union conn_context {
- struct e4_core_conn_context core_ctx;
- struct e4_eth_conn_context eth_ctx;
- struct e4_iscsi_conn_context iscsi_ctx;
- struct e4_fcoe_conn_context fcoe_ctx;
- struct e4_roce_conn_context roce_ctx;
+ struct core_conn_context core_ctx;
+ struct eth_conn_context eth_ctx;
+ struct iscsi_conn_context iscsi_ctx;
+ struct fcoe_conn_context fcoe_ctx;
+ struct roce_conn_context roce_ctx;
};
/* TYPE-0 task context - iSCSI, FCOE */
union type0_task_context {
- struct e4_iscsi_task_context iscsi_ctx;
- struct e4_fcoe_task_context fcoe_ctx;
+ struct iscsi_task_context iscsi_ctx;
+ struct fcoe_task_context fcoe_ctx;
};
/* TYPE-1 task context - ROCE */
union type1_task_context {
- struct e4_rdma_task_context roce_ctx;
+ struct rdma_task_context roce_ctx;
};
struct src_ent {
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.h b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
index 8adb7ed0c12d..168ce2c50385 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
@@ -28,24 +28,23 @@ struct qed_tid_mem {
};
/**
- * @brief qedo_cid_get_cxt_info - Returns the context info for a specific cid
+ * qed_cxt_get_cid_info(): Returns the context info for a specific cidi.
*
+ * @p_hwfn: HW device data.
+ * @p_info: In/out.
*
- * @param p_hwfn
- * @param p_info in/out
- *
- * @return int
+ * Return: Int.
*/
int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn,
struct qed_cxt_info *p_info);
/**
- * @brief qed_cxt_get_tid_mem_info
+ * qed_cxt_get_tid_mem_info(): Returns the tid mem info.
*
- * @param p_hwfn
- * @param p_info
+ * @p_hwfn: HW device data.
+ * @p_info: in/out.
*
- * @return int
+ * Return: int.
*/
int qed_cxt_get_tid_mem_info(struct qed_hwfn *p_hwfn,
struct qed_tid_mem *p_info);
@@ -64,142 +63,155 @@ u32 qed_cxt_get_proto_cid_count(struct qed_hwfn *p_hwfn,
enum protocol_type type, u32 *vf_cid);
/**
- * @brief qed_cxt_set_pf_params - Set the PF params for cxt init
+ * qed_cxt_set_pf_params(): Set the PF params for cxt init.
+ *
+ * @p_hwfn: HW device data.
+ * @rdma_tasks: Requested maximum.
*
- * @param p_hwfn
- * @param rdma_tasks - requested maximum
- * @return int
+ * Return: int.
*/
int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn, u32 rdma_tasks);
/**
- * @brief qed_cxt_cfg_ilt_compute - compute ILT init parameters
+ * qed_cxt_cfg_ilt_compute(): Compute ILT init parameters.
*
- * @param p_hwfn
- * @param last_line
+ * @p_hwfn: HW device data.
+ * @last_line: Last_line.
*
- * @return int
+ * Return: Int
*/
int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn, u32 *last_line);
/**
- * @brief qed_cxt_cfg_ilt_compute_excess - how many lines can be decreased
+ * qed_cxt_cfg_ilt_compute_excess(): How many lines can be decreased.
+ *
+ * @p_hwfn: HW device data.
+ * @used_lines: Used lines.
*
- * @param p_hwfn
- * @param used_lines
+ * Return: Int.
*/
u32 qed_cxt_cfg_ilt_compute_excess(struct qed_hwfn *p_hwfn, u32 used_lines);
/**
- * @brief qed_cxt_mngr_alloc - Allocate and init the context manager struct
+ * qed_cxt_mngr_alloc(): Allocate and init the context manager struct.
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
*
- * @return int
+ * Return: Int.
*/
int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_cxt_mngr_free
+ * qed_cxt_mngr_free() - Context manager free.
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
+ *
+ * Return: Void.
*/
void qed_cxt_mngr_free(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_cxt_tables_alloc - Allocate ILT shadow, Searcher T2, acquired map
+ * qed_cxt_tables_alloc(): Allocate ILT shadow, Searcher T2, acquired map.
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
*
- * @return int
+ * Return: Int.
*/
int qed_cxt_tables_alloc(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_cxt_mngr_setup - Reset the acquired CIDs
+ * qed_cxt_mngr_setup(): Reset the acquired CIDs.
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
*/
void qed_cxt_mngr_setup(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_cxt_hw_init_common - Initailze ILT and DQ, common phase, per path.
- *
+ * qed_cxt_hw_init_common(): Initailze ILT and DQ, common phase, per path.
*
+ * @p_hwfn: HW device data.
*
- * @param p_hwfn
+ * Return: Void.
*/
void qed_cxt_hw_init_common(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_cxt_hw_init_pf - Initailze ILT and DQ, PF phase, per path.
+ * qed_cxt_hw_init_pf(): Initailze ILT and DQ, PF phase, per path.
*
- * @param p_hwfn
- * @param p_ptt
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ *
+ * Return: Void.
*/
void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
/**
- * @brief qed_qm_init_pf - Initailze the QM PF phase, per path
+ * qed_qm_init_pf(): Initailze the QM PF phase, per path.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @is_pf_loading: Is pf pending.
*
- * @param p_hwfn
- * @param p_ptt
- * @param is_pf_loading
+ * Return: Void.
*/
void qed_qm_init_pf(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, bool is_pf_loading);
/**
- * @brief Reconfigures QM pf on the fly
+ * qed_qm_reconf(): Reconfigures QM pf on the fly.
*
- * @param p_hwfn
- * @param p_ptt
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
*
- * @return int
+ * Return: Int.
*/
int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
#define QED_CXT_PF_CID (0xff)
/**
- * @brief qed_cxt_release - Release a cid
+ * qed_cxt_release_cid(): Release a cid.
*
- * @param p_hwfn
- * @param cid
+ * @p_hwfn: HW device data.
+ * @cid: Cid.
+ *
+ * Return: Void.
*/
void qed_cxt_release_cid(struct qed_hwfn *p_hwfn, u32 cid);
/**
- * @brief qed_cxt_release - Release a cid belonging to a vf-queue
+ * _qed_cxt_release_cid(): Release a cid belonging to a vf-queue.
+ *
+ * @p_hwfn: HW device data.
+ * @cid: Cid.
+ * @vfid: Engine relative index. QED_CXT_PF_CID if belongs to PF.
*
- * @param p_hwfn
- * @param cid
- * @param vfid - engine relative index. QED_CXT_PF_CID if belongs to PF
+ * Return: Void.
*/
void _qed_cxt_release_cid(struct qed_hwfn *p_hwfn, u32 cid, u8 vfid);
/**
- * @brief qed_cxt_acquire - Acquire a new cid of a specific protocol type
+ * qed_cxt_acquire_cid(): Acquire a new cid of a specific protocol type.
*
- * @param p_hwfn
- * @param type
- * @param p_cid
+ * @p_hwfn: HW device data.
+ * @type: Type.
+ * @p_cid: Pointer cid.
*
- * @return int
+ * Return: Int.
*/
int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
enum protocol_type type, u32 *p_cid);
/**
- * @brief _qed_cxt_acquire - Acquire a new cid of a specific protocol type
- * for a vf-queue
+ * _qed_cxt_acquire_cid(): Acquire a new cid of a specific protocol type
+ * for a vf-queue.
*
- * @param p_hwfn
- * @param type
- * @param p_cid
- * @param vfid - engine relative index. QED_CXT_PF_CID if belongs to PF
+ * @p_hwfn: HW device data.
+ * @type: Type.
+ * @p_cid: Pointer cid.
+ * @vfid: Engine relative index. QED_CXT_PF_CID if belongs to PF.
*
- * @return int
+ * Return: Int.
*/
int _qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
enum protocol_type type, u32 *p_cid, u8 vfid);
@@ -334,7 +346,10 @@ struct qed_cxt_mngr {
/* Maximal number of L2 steering filters */
u32 arfs_count;
- u8 task_type_id;
+ u16 iscsi_task_pages;
+ u16 fcoe_task_pages;
+ u16 roce_task_pages;
+ u16 eth_task_pages;
u16 task_ctx_size;
u16 conn_ctx_size;
};
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dbg_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_dbg_hsi.h
new file mode 100644
index 000000000000..9d5a0c9e1ca0
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_dbg_hsi.h
@@ -0,0 +1,1491 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/* QLogic qed NIC Driver
+ * Copyright (c) 2019-2021 Marvell International Ltd.
+ */
+#ifndef _QED_DBG_HSI_H
+#define _QED_DBG_HSI_H
+
+#include <linux/types.h>
+#include <linux/io.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+
+/****************************************/
+/* Debug Tools HSI constants and macros */
+/****************************************/
+
+enum block_id {
+ BLOCK_GRC,
+ BLOCK_MISCS,
+ BLOCK_MISC,
+ BLOCK_DBU,
+ BLOCK_PGLUE_B,
+ BLOCK_CNIG,
+ BLOCK_CPMU,
+ BLOCK_NCSI,
+ BLOCK_OPTE,
+ BLOCK_BMB,
+ BLOCK_PCIE,
+ BLOCK_MCP,
+ BLOCK_MCP2,
+ BLOCK_PSWHST,
+ BLOCK_PSWHST2,
+ BLOCK_PSWRD,
+ BLOCK_PSWRD2,
+ BLOCK_PSWWR,
+ BLOCK_PSWWR2,
+ BLOCK_PSWRQ,
+ BLOCK_PSWRQ2,
+ BLOCK_PGLCS,
+ BLOCK_DMAE,
+ BLOCK_PTU,
+ BLOCK_TCM,
+ BLOCK_MCM,
+ BLOCK_UCM,
+ BLOCK_XCM,
+ BLOCK_YCM,
+ BLOCK_PCM,
+ BLOCK_QM,
+ BLOCK_TM,
+ BLOCK_DORQ,
+ BLOCK_BRB,
+ BLOCK_SRC,
+ BLOCK_PRS,
+ BLOCK_TSDM,
+ BLOCK_MSDM,
+ BLOCK_USDM,
+ BLOCK_XSDM,
+ BLOCK_YSDM,
+ BLOCK_PSDM,
+ BLOCK_TSEM,
+ BLOCK_MSEM,
+ BLOCK_USEM,
+ BLOCK_XSEM,
+ BLOCK_YSEM,
+ BLOCK_PSEM,
+ BLOCK_RSS,
+ BLOCK_TMLD,
+ BLOCK_MULD,
+ BLOCK_YULD,
+ BLOCK_XYLD,
+ BLOCK_PRM,
+ BLOCK_PBF_PB1,
+ BLOCK_PBF_PB2,
+ BLOCK_RPB,
+ BLOCK_BTB,
+ BLOCK_PBF,
+ BLOCK_RDIF,
+ BLOCK_TDIF,
+ BLOCK_CDU,
+ BLOCK_CCFC,
+ BLOCK_TCFC,
+ BLOCK_IGU,
+ BLOCK_CAU,
+ BLOCK_UMAC,
+ BLOCK_XMAC,
+ BLOCK_MSTAT,
+ BLOCK_DBG,
+ BLOCK_NIG,
+ BLOCK_WOL,
+ BLOCK_BMBN,
+ BLOCK_IPC,
+ BLOCK_NWM,
+ BLOCK_NWS,
+ BLOCK_MS,
+ BLOCK_PHY_PCIE,
+ BLOCK_LED,
+ BLOCK_AVS_WRAP,
+ BLOCK_PXPREQBUS,
+ BLOCK_BAR0_MAP,
+ BLOCK_MCP_FIO,
+ BLOCK_LAST_INIT,
+ BLOCK_PRS_FC,
+ BLOCK_PBF_FC,
+ BLOCK_NIG_LB_FC,
+ BLOCK_NIG_LB_FC_PLLH,
+ BLOCK_NIG_TX_FC_PLLH,
+ BLOCK_NIG_TX_FC,
+ BLOCK_NIG_RX_FC_PLLH,
+ BLOCK_NIG_RX_FC,
+ MAX_BLOCK_ID
+};
+
+/* binary debug buffer types */
+enum bin_dbg_buffer_type {
+ BIN_BUF_DBG_MODE_TREE,
+ BIN_BUF_DBG_DUMP_REG,
+ BIN_BUF_DBG_DUMP_MEM,
+ BIN_BUF_DBG_IDLE_CHK_REGS,
+ BIN_BUF_DBG_IDLE_CHK_IMMS,
+ BIN_BUF_DBG_IDLE_CHK_RULES,
+ BIN_BUF_DBG_IDLE_CHK_PARSING_DATA,
+ BIN_BUF_DBG_ATTN_BLOCKS,
+ BIN_BUF_DBG_ATTN_REGS,
+ BIN_BUF_DBG_ATTN_INDEXES,
+ BIN_BUF_DBG_ATTN_NAME_OFFSETS,
+ BIN_BUF_DBG_BLOCKS,
+ BIN_BUF_DBG_BLOCKS_CHIP_DATA,
+ BIN_BUF_DBG_BUS_LINES,
+ BIN_BUF_DBG_BLOCKS_USER_DATA,
+ BIN_BUF_DBG_BLOCKS_CHIP_USER_DATA,
+ BIN_BUF_DBG_BUS_LINE_NAME_OFFSETS,
+ BIN_BUF_DBG_RESET_REGS,
+ BIN_BUF_DBG_PARSING_STRINGS,
+ MAX_BIN_DBG_BUFFER_TYPE
+};
+
+/* Attention bit mapping */
+struct dbg_attn_bit_mapping {
+ u16 data;
+#define DBG_ATTN_BIT_MAPPING_VAL_MASK 0x7FFF
+#define DBG_ATTN_BIT_MAPPING_VAL_SHIFT 0
+#define DBG_ATTN_BIT_MAPPING_IS_UNUSED_BIT_CNT_MASK 0x1
+#define DBG_ATTN_BIT_MAPPING_IS_UNUSED_BIT_CNT_SHIFT 15
+};
+
+/* Attention block per-type data */
+struct dbg_attn_block_type_data {
+ u16 names_offset;
+ u16 reserved1;
+ u8 num_regs;
+ u8 reserved2;
+ u16 regs_offset;
+
+};
+
+/* Block attentions */
+struct dbg_attn_block {
+ struct dbg_attn_block_type_data per_type_data[2];
+};
+
+/* Attention register result */
+struct dbg_attn_reg_result {
+ u32 data;
+#define DBG_ATTN_REG_RESULT_STS_ADDRESS_MASK 0xFFFFFF
+#define DBG_ATTN_REG_RESULT_STS_ADDRESS_SHIFT 0
+#define DBG_ATTN_REG_RESULT_NUM_REG_ATTN_MASK 0xFF
+#define DBG_ATTN_REG_RESULT_NUM_REG_ATTN_SHIFT 24
+ u16 block_attn_offset;
+ u16 reserved;
+ u32 sts_val;
+ u32 mask_val;
+};
+
+/* Attention block result */
+struct dbg_attn_block_result {
+ u8 block_id;
+ u8 data;
+#define DBG_ATTN_BLOCK_RESULT_ATTN_TYPE_MASK 0x3
+#define DBG_ATTN_BLOCK_RESULT_ATTN_TYPE_SHIFT 0
+#define DBG_ATTN_BLOCK_RESULT_NUM_REGS_MASK 0x3F
+#define DBG_ATTN_BLOCK_RESULT_NUM_REGS_SHIFT 2
+ u16 names_offset;
+ struct dbg_attn_reg_result reg_results[15];
+};
+
+/* Mode header */
+struct dbg_mode_hdr {
+ u16 data;
+#define DBG_MODE_HDR_EVAL_MODE_MASK 0x1
+#define DBG_MODE_HDR_EVAL_MODE_SHIFT 0
+#define DBG_MODE_HDR_MODES_BUF_OFFSET_MASK 0x7FFF
+#define DBG_MODE_HDR_MODES_BUF_OFFSET_SHIFT 1
+};
+
+/* Attention register */
+struct dbg_attn_reg {
+ struct dbg_mode_hdr mode;
+ u16 block_attn_offset;
+ u32 data;
+#define DBG_ATTN_REG_STS_ADDRESS_MASK 0xFFFFFF
+#define DBG_ATTN_REG_STS_ADDRESS_SHIFT 0
+#define DBG_ATTN_REG_NUM_REG_ATTN_MASK 0xFF
+#define DBG_ATTN_REG_NUM_REG_ATTN_SHIFT 24
+ u32 sts_clr_address;
+ u32 mask_address;
+};
+
+/* Attention types */
+enum dbg_attn_type {
+ ATTN_TYPE_INTERRUPT,
+ ATTN_TYPE_PARITY,
+ MAX_DBG_ATTN_TYPE
+};
+
+/* Block debug data */
+struct dbg_block {
+ u8 name[15];
+ u8 associated_storm_letter;
+};
+
+/* Chip-specific block debug data */
+struct dbg_block_chip {
+ u8 flags;
+#define DBG_BLOCK_CHIP_IS_REMOVED_MASK 0x1
+#define DBG_BLOCK_CHIP_IS_REMOVED_SHIFT 0
+#define DBG_BLOCK_CHIP_HAS_RESET_REG_MASK 0x1
+#define DBG_BLOCK_CHIP_HAS_RESET_REG_SHIFT 1
+#define DBG_BLOCK_CHIP_UNRESET_BEFORE_DUMP_MASK 0x1
+#define DBG_BLOCK_CHIP_UNRESET_BEFORE_DUMP_SHIFT 2
+#define DBG_BLOCK_CHIP_HAS_DBG_BUS_MASK 0x1
+#define DBG_BLOCK_CHIP_HAS_DBG_BUS_SHIFT 3
+#define DBG_BLOCK_CHIP_HAS_LATENCY_EVENTS_MASK 0x1
+#define DBG_BLOCK_CHIP_HAS_LATENCY_EVENTS_SHIFT 4
+#define DBG_BLOCK_CHIP_RESERVED0_MASK 0x7
+#define DBG_BLOCK_CHIP_RESERVED0_SHIFT 5
+ u8 dbg_client_id;
+ u8 reset_reg_id;
+ u8 reset_reg_bit_offset;
+ struct dbg_mode_hdr dbg_bus_mode;
+ u16 reserved1;
+ u8 reserved2;
+ u8 num_of_dbg_bus_lines;
+ u16 dbg_bus_lines_offset;
+ u32 dbg_select_reg_addr;
+ u32 dbg_dword_enable_reg_addr;
+ u32 dbg_shift_reg_addr;
+ u32 dbg_force_valid_reg_addr;
+ u32 dbg_force_frame_reg_addr;
+};
+
+/* Chip-specific block user debug data */
+struct dbg_block_chip_user {
+ u8 num_of_dbg_bus_lines;
+ u8 has_latency_events;
+ u16 names_offset;
+};
+
+/* Block user debug data */
+struct dbg_block_user {
+ u8 name[16];
+};
+
+/* Block Debug line data */
+struct dbg_bus_line {
+ u8 data;
+#define DBG_BUS_LINE_NUM_OF_GROUPS_MASK 0xF
+#define DBG_BUS_LINE_NUM_OF_GROUPS_SHIFT 0
+#define DBG_BUS_LINE_IS_256B_MASK 0x1
+#define DBG_BUS_LINE_IS_256B_SHIFT 4
+#define DBG_BUS_LINE_RESERVED_MASK 0x7
+#define DBG_BUS_LINE_RESERVED_SHIFT 5
+ u8 group_sizes;
+};
+
+/* Condition header for registers dump */
+struct dbg_dump_cond_hdr {
+ struct dbg_mode_hdr mode; /* Mode header */
+ u8 block_id; /* block ID */
+ u8 data_size; /* size in dwords of the data following this header */
+};
+
+/* Memory data for registers dump */
+struct dbg_dump_mem {
+ u32 dword0;
+#define DBG_DUMP_MEM_ADDRESS_MASK 0xFFFFFF
+#define DBG_DUMP_MEM_ADDRESS_SHIFT 0
+#define DBG_DUMP_MEM_MEM_GROUP_ID_MASK 0xFF
+#define DBG_DUMP_MEM_MEM_GROUP_ID_SHIFT 24
+ u32 dword1;
+#define DBG_DUMP_MEM_LENGTH_MASK 0xFFFFFF
+#define DBG_DUMP_MEM_LENGTH_SHIFT 0
+#define DBG_DUMP_MEM_WIDE_BUS_MASK 0x1
+#define DBG_DUMP_MEM_WIDE_BUS_SHIFT 24
+#define DBG_DUMP_MEM_RESERVED_MASK 0x7F
+#define DBG_DUMP_MEM_RESERVED_SHIFT 25
+};
+
+/* Register data for registers dump */
+struct dbg_dump_reg {
+ u32 data;
+#define DBG_DUMP_REG_ADDRESS_MASK 0x7FFFFF
+#define DBG_DUMP_REG_ADDRESS_SHIFT 0
+#define DBG_DUMP_REG_WIDE_BUS_MASK 0x1
+#define DBG_DUMP_REG_WIDE_BUS_SHIFT 23
+#define DBG_DUMP_REG_LENGTH_MASK 0xFF
+#define DBG_DUMP_REG_LENGTH_SHIFT 24
+};
+
+/* Split header for registers dump */
+struct dbg_dump_split_hdr {
+ u32 hdr;
+#define DBG_DUMP_SPLIT_HDR_DATA_SIZE_MASK 0xFFFFFF
+#define DBG_DUMP_SPLIT_HDR_DATA_SIZE_SHIFT 0
+#define DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID_MASK 0xFF
+#define DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID_SHIFT 24
+};
+
+/* Condition header for idle check */
+struct dbg_idle_chk_cond_hdr {
+ struct dbg_mode_hdr mode; /* Mode header */
+ u16 data_size; /* size in dwords of the data following this header */
+};
+
+/* Idle Check condition register */
+struct dbg_idle_chk_cond_reg {
+ u32 data;
+#define DBG_IDLE_CHK_COND_REG_ADDRESS_MASK 0x7FFFFF
+#define DBG_IDLE_CHK_COND_REG_ADDRESS_SHIFT 0
+#define DBG_IDLE_CHK_COND_REG_WIDE_BUS_MASK 0x1
+#define DBG_IDLE_CHK_COND_REG_WIDE_BUS_SHIFT 23
+#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_MASK 0xFF
+#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_SHIFT 24
+ u16 num_entries;
+ u8 entry_size;
+ u8 start_entry;
+};
+
+/* Idle Check info register */
+struct dbg_idle_chk_info_reg {
+ u32 data;
+#define DBG_IDLE_CHK_INFO_REG_ADDRESS_MASK 0x7FFFFF
+#define DBG_IDLE_CHK_INFO_REG_ADDRESS_SHIFT 0
+#define DBG_IDLE_CHK_INFO_REG_WIDE_BUS_MASK 0x1
+#define DBG_IDLE_CHK_INFO_REG_WIDE_BUS_SHIFT 23
+#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_MASK 0xFF
+#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_SHIFT 24
+ u16 size; /* register size in dwords */
+ struct dbg_mode_hdr mode; /* Mode header */
+};
+
+/* Idle Check register */
+union dbg_idle_chk_reg {
+ struct dbg_idle_chk_cond_reg cond_reg; /* condition register */
+ struct dbg_idle_chk_info_reg info_reg; /* info register */
+};
+
+/* Idle Check result header */
+struct dbg_idle_chk_result_hdr {
+ u16 rule_id; /* Failing rule index */
+ u16 mem_entry_id; /* Failing memory entry index */
+ u8 num_dumped_cond_regs; /* number of dumped condition registers */
+ u8 num_dumped_info_regs; /* number of dumped condition registers */
+ u8 severity; /* from dbg_idle_chk_severity_types enum */
+ u8 reserved;
+};
+
+/* Idle Check result register header */
+struct dbg_idle_chk_result_reg_hdr {
+ u8 data;
+#define DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM_MASK 0x1
+#define DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM_SHIFT 0
+#define DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID_MASK 0x7F
+#define DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID_SHIFT 1
+ u8 start_entry; /* index of the first checked entry */
+ u16 size; /* register size in dwords */
+};
+
+/* Idle Check rule */
+struct dbg_idle_chk_rule {
+ u16 rule_id; /* Idle Check rule ID */
+ u8 severity; /* value from dbg_idle_chk_severity_types enum */
+ u8 cond_id; /* Condition ID */
+ u8 num_cond_regs; /* number of condition registers */
+ u8 num_info_regs; /* number of info registers */
+ u8 num_imms; /* number of immediates in the condition */
+ u8 reserved1;
+ u16 reg_offset; /* offset of this rules registers in the idle check
+ * register array (in dbg_idle_chk_reg units).
+ */
+ u16 imm_offset; /* offset of this rules immediate values in the
+ * immediate values array (in dwords).
+ */
+};
+
+/* Idle Check rule parsing data */
+struct dbg_idle_chk_rule_parsing_data {
+ u32 data;
+#define DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG_MASK 0x1
+#define DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG_SHIFT 0
+#define DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET_MASK 0x7FFFFFFF
+#define DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET_SHIFT 1
+};
+
+/* Idle check severity types */
+enum dbg_idle_chk_severity_types {
+ /* idle check failure should cause an error */
+ IDLE_CHK_SEVERITY_ERROR,
+ /* idle check failure should cause an error only if theres no traffic */
+ IDLE_CHK_SEVERITY_ERROR_NO_TRAFFIC,
+ /* idle check failure should cause a warning */
+ IDLE_CHK_SEVERITY_WARNING,
+ MAX_DBG_IDLE_CHK_SEVERITY_TYPES
+};
+
+/* Reset register */
+struct dbg_reset_reg {
+ u32 data;
+#define DBG_RESET_REG_ADDR_MASK 0xFFFFFF
+#define DBG_RESET_REG_ADDR_SHIFT 0
+#define DBG_RESET_REG_IS_REMOVED_MASK 0x1
+#define DBG_RESET_REG_IS_REMOVED_SHIFT 24
+#define DBG_RESET_REG_RESERVED_MASK 0x7F
+#define DBG_RESET_REG_RESERVED_SHIFT 25
+};
+
+/* Debug Bus block data */
+struct dbg_bus_block_data {
+ u8 enable_mask;
+ u8 right_shift;
+ u8 force_valid_mask;
+ u8 force_frame_mask;
+ u8 dword_mask;
+ u8 line_num;
+ u8 hw_id;
+ u8 flags;
+#define DBG_BUS_BLOCK_DATA_IS_256B_LINE_MASK 0x1
+#define DBG_BUS_BLOCK_DATA_IS_256B_LINE_SHIFT 0
+#define DBG_BUS_BLOCK_DATA_RESERVED_MASK 0x7F
+#define DBG_BUS_BLOCK_DATA_RESERVED_SHIFT 1
+};
+
+enum dbg_bus_clients {
+ DBG_BUS_CLIENT_RBCN,
+ DBG_BUS_CLIENT_RBCP,
+ DBG_BUS_CLIENT_RBCR,
+ DBG_BUS_CLIENT_RBCT,
+ DBG_BUS_CLIENT_RBCU,
+ DBG_BUS_CLIENT_RBCF,
+ DBG_BUS_CLIENT_RBCX,
+ DBG_BUS_CLIENT_RBCS,
+ DBG_BUS_CLIENT_RBCH,
+ DBG_BUS_CLIENT_RBCZ,
+ DBG_BUS_CLIENT_OTHER_ENGINE,
+ DBG_BUS_CLIENT_TIMESTAMP,
+ DBG_BUS_CLIENT_CPU,
+ DBG_BUS_CLIENT_RBCY,
+ DBG_BUS_CLIENT_RBCQ,
+ DBG_BUS_CLIENT_RBCM,
+ DBG_BUS_CLIENT_RBCB,
+ DBG_BUS_CLIENT_RBCW,
+ DBG_BUS_CLIENT_RBCV,
+ MAX_DBG_BUS_CLIENTS
+};
+
+/* Debug Bus constraint operation types */
+enum dbg_bus_constraint_ops {
+ DBG_BUS_CONSTRAINT_OP_EQ,
+ DBG_BUS_CONSTRAINT_OP_NE,
+ DBG_BUS_CONSTRAINT_OP_LT,
+ DBG_BUS_CONSTRAINT_OP_LTC,
+ DBG_BUS_CONSTRAINT_OP_LE,
+ DBG_BUS_CONSTRAINT_OP_LEC,
+ DBG_BUS_CONSTRAINT_OP_GT,
+ DBG_BUS_CONSTRAINT_OP_GTC,
+ DBG_BUS_CONSTRAINT_OP_GE,
+ DBG_BUS_CONSTRAINT_OP_GEC,
+ MAX_DBG_BUS_CONSTRAINT_OPS
+};
+
+/* Debug Bus trigger state data */
+struct dbg_bus_trigger_state_data {
+ u8 msg_len;
+ u8 constraint_dword_mask;
+ u8 storm_id;
+ u8 reserved;
+};
+
+/* Debug Bus memory address */
+struct dbg_bus_mem_addr {
+ u32 lo;
+ u32 hi;
+};
+
+/* Debug Bus PCI buffer data */
+struct dbg_bus_pci_buf_data {
+ struct dbg_bus_mem_addr phys_addr; /* PCI buffer physical address */
+ struct dbg_bus_mem_addr virt_addr; /* PCI buffer virtual address */
+ u32 size; /* PCI buffer size in bytes */
+};
+
+/* Debug Bus Storm EID range filter params */
+struct dbg_bus_storm_eid_range_params {
+ u8 min; /* Minimal event ID to filter on */
+ u8 max; /* Maximal event ID to filter on */
+};
+
+/* Debug Bus Storm EID mask filter params */
+struct dbg_bus_storm_eid_mask_params {
+ u8 val; /* Event ID value */
+ u8 mask; /* Event ID mask. 1s in the mask = dont care bits. */
+};
+
+/* Debug Bus Storm EID filter params */
+union dbg_bus_storm_eid_params {
+ struct dbg_bus_storm_eid_range_params range;
+ struct dbg_bus_storm_eid_mask_params mask;
+};
+
+/* Debug Bus Storm data */
+struct dbg_bus_storm_data {
+ u8 enabled;
+ u8 mode;
+ u8 hw_id;
+ u8 eid_filter_en;
+ u8 eid_range_not_mask;
+ u8 cid_filter_en;
+ union dbg_bus_storm_eid_params eid_filter_params;
+ u32 cid;
+};
+
+/* Debug Bus data */
+struct dbg_bus_data {
+ u32 app_version;
+ u8 state;
+ u8 mode_256b_en;
+ u8 num_enabled_blocks;
+ u8 num_enabled_storms;
+ u8 target;
+ u8 one_shot_en;
+ u8 grc_input_en;
+ u8 timestamp_input_en;
+ u8 filter_en;
+ u8 adding_filter;
+ u8 filter_pre_trigger;
+ u8 filter_post_trigger;
+ u8 trigger_en;
+ u8 filter_constraint_dword_mask;
+ u8 next_trigger_state;
+ u8 next_constraint_id;
+ struct dbg_bus_trigger_state_data trigger_states[3];
+ u8 filter_msg_len;
+ u8 rcv_from_other_engine;
+ u8 blocks_dword_mask;
+ u8 blocks_dword_overlap;
+ u32 hw_id_mask;
+ struct dbg_bus_pci_buf_data pci_buf;
+ struct dbg_bus_block_data blocks[132];
+ struct dbg_bus_storm_data storms[6];
+};
+
+/* Debug bus states */
+enum dbg_bus_states {
+ DBG_BUS_STATE_IDLE,
+ DBG_BUS_STATE_READY,
+ DBG_BUS_STATE_RECORDING,
+ DBG_BUS_STATE_STOPPED,
+ MAX_DBG_BUS_STATES
+};
+
+/* Debug Bus Storm modes */
+enum dbg_bus_storm_modes {
+ DBG_BUS_STORM_MODE_PRINTF,
+ DBG_BUS_STORM_MODE_PRAM_ADDR,
+ DBG_BUS_STORM_MODE_DRA_RW,
+ DBG_BUS_STORM_MODE_DRA_W,
+ DBG_BUS_STORM_MODE_LD_ST_ADDR,
+ DBG_BUS_STORM_MODE_DRA_FSM,
+ DBG_BUS_STORM_MODE_FAST_DBGMUX,
+ DBG_BUS_STORM_MODE_RH,
+ DBG_BUS_STORM_MODE_RH_WITH_STORE,
+ DBG_BUS_STORM_MODE_FOC,
+ DBG_BUS_STORM_MODE_EXT_STORE,
+ MAX_DBG_BUS_STORM_MODES
+};
+
+/* Debug bus target IDs */
+enum dbg_bus_targets {
+ DBG_BUS_TARGET_ID_INT_BUF,
+ DBG_BUS_TARGET_ID_NIG,
+ DBG_BUS_TARGET_ID_PCI,
+ MAX_DBG_BUS_TARGETS
+};
+
+/* GRC Dump data */
+struct dbg_grc_data {
+ u8 params_initialized;
+ u8 reserved1;
+ u16 reserved2;
+ u32 param_val[48];
+};
+
+/* Debug GRC params */
+enum dbg_grc_params {
+ DBG_GRC_PARAM_DUMP_TSTORM,
+ DBG_GRC_PARAM_DUMP_MSTORM,
+ DBG_GRC_PARAM_DUMP_USTORM,
+ DBG_GRC_PARAM_DUMP_XSTORM,
+ DBG_GRC_PARAM_DUMP_YSTORM,
+ DBG_GRC_PARAM_DUMP_PSTORM,
+ DBG_GRC_PARAM_DUMP_REGS,
+ DBG_GRC_PARAM_DUMP_RAM,
+ DBG_GRC_PARAM_DUMP_PBUF,
+ DBG_GRC_PARAM_DUMP_IOR,
+ DBG_GRC_PARAM_DUMP_VFC,
+ DBG_GRC_PARAM_DUMP_CM_CTX,
+ DBG_GRC_PARAM_DUMP_PXP,
+ DBG_GRC_PARAM_DUMP_RSS,
+ DBG_GRC_PARAM_DUMP_CAU,
+ DBG_GRC_PARAM_DUMP_QM,
+ DBG_GRC_PARAM_DUMP_MCP,
+ DBG_GRC_PARAM_DUMP_DORQ,
+ DBG_GRC_PARAM_DUMP_CFC,
+ DBG_GRC_PARAM_DUMP_IGU,
+ DBG_GRC_PARAM_DUMP_BRB,
+ DBG_GRC_PARAM_DUMP_BTB,
+ DBG_GRC_PARAM_DUMP_BMB,
+ DBG_GRC_PARAM_RESERVD1,
+ DBG_GRC_PARAM_DUMP_MULD,
+ DBG_GRC_PARAM_DUMP_PRS,
+ DBG_GRC_PARAM_DUMP_DMAE,
+ DBG_GRC_PARAM_DUMP_TM,
+ DBG_GRC_PARAM_DUMP_SDM,
+ DBG_GRC_PARAM_DUMP_DIF,
+ DBG_GRC_PARAM_DUMP_STATIC,
+ DBG_GRC_PARAM_UNSTALL,
+ DBG_GRC_PARAM_RESERVED2,
+ DBG_GRC_PARAM_MCP_TRACE_META_SIZE,
+ DBG_GRC_PARAM_EXCLUDE_ALL,
+ DBG_GRC_PARAM_CRASH,
+ DBG_GRC_PARAM_PARITY_SAFE,
+ DBG_GRC_PARAM_DUMP_CM,
+ DBG_GRC_PARAM_DUMP_PHY,
+ DBG_GRC_PARAM_NO_MCP,
+ DBG_GRC_PARAM_NO_FW_VER,
+ DBG_GRC_PARAM_RESERVED3,
+ DBG_GRC_PARAM_DUMP_MCP_HW_DUMP,
+ DBG_GRC_PARAM_DUMP_ILT_CDUC,
+ DBG_GRC_PARAM_DUMP_ILT_CDUT,
+ DBG_GRC_PARAM_DUMP_CAU_EXT,
+ MAX_DBG_GRC_PARAMS
+};
+
+/* Debug status codes */
+enum dbg_status {
+ DBG_STATUS_OK,
+ DBG_STATUS_APP_VERSION_NOT_SET,
+ DBG_STATUS_UNSUPPORTED_APP_VERSION,
+ DBG_STATUS_DBG_BLOCK_NOT_RESET,
+ DBG_STATUS_INVALID_ARGS,
+ DBG_STATUS_OUTPUT_ALREADY_SET,
+ DBG_STATUS_INVALID_PCI_BUF_SIZE,
+ DBG_STATUS_PCI_BUF_ALLOC_FAILED,
+ DBG_STATUS_PCI_BUF_NOT_ALLOCATED,
+ DBG_STATUS_INVALID_FILTER_TRIGGER_DWORDS,
+ DBG_STATUS_NO_MATCHING_FRAMING_MODE,
+ DBG_STATUS_VFC_READ_ERROR,
+ DBG_STATUS_STORM_ALREADY_ENABLED,
+ DBG_STATUS_STORM_NOT_ENABLED,
+ DBG_STATUS_BLOCK_ALREADY_ENABLED,
+ DBG_STATUS_BLOCK_NOT_ENABLED,
+ DBG_STATUS_NO_INPUT_ENABLED,
+ DBG_STATUS_NO_FILTER_TRIGGER_256B,
+ DBG_STATUS_FILTER_ALREADY_ENABLED,
+ DBG_STATUS_TRIGGER_ALREADY_ENABLED,
+ DBG_STATUS_TRIGGER_NOT_ENABLED,
+ DBG_STATUS_CANT_ADD_CONSTRAINT,
+ DBG_STATUS_TOO_MANY_TRIGGER_STATES,
+ DBG_STATUS_TOO_MANY_CONSTRAINTS,
+ DBG_STATUS_RECORDING_NOT_STARTED,
+ DBG_STATUS_DATA_DIDNT_TRIGGER,
+ DBG_STATUS_NO_DATA_RECORDED,
+ DBG_STATUS_DUMP_BUF_TOO_SMALL,
+ DBG_STATUS_DUMP_NOT_CHUNK_ALIGNED,
+ DBG_STATUS_UNKNOWN_CHIP,
+ DBG_STATUS_VIRT_MEM_ALLOC_FAILED,
+ DBG_STATUS_BLOCK_IN_RESET,
+ DBG_STATUS_INVALID_TRACE_SIGNATURE,
+ DBG_STATUS_INVALID_NVRAM_BUNDLE,
+ DBG_STATUS_NVRAM_GET_IMAGE_FAILED,
+ DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE,
+ DBG_STATUS_NVRAM_READ_FAILED,
+ DBG_STATUS_IDLE_CHK_PARSE_FAILED,
+ DBG_STATUS_MCP_TRACE_BAD_DATA,
+ DBG_STATUS_MCP_TRACE_NO_META,
+ DBG_STATUS_MCP_COULD_NOT_HALT,
+ DBG_STATUS_MCP_COULD_NOT_RESUME,
+ DBG_STATUS_RESERVED0,
+ DBG_STATUS_SEMI_FIFO_NOT_EMPTY,
+ DBG_STATUS_IGU_FIFO_BAD_DATA,
+ DBG_STATUS_MCP_COULD_NOT_MASK_PRTY,
+ DBG_STATUS_FW_ASSERTS_PARSE_FAILED,
+ DBG_STATUS_REG_FIFO_BAD_DATA,
+ DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA,
+ DBG_STATUS_DBG_ARRAY_NOT_SET,
+ DBG_STATUS_RESERVED1,
+ DBG_STATUS_NON_MATCHING_LINES,
+ DBG_STATUS_INSUFFICIENT_HW_IDS,
+ DBG_STATUS_DBG_BUS_IN_USE,
+ DBG_STATUS_INVALID_STORM_DBG_MODE,
+ DBG_STATUS_OTHER_ENGINE_BB_ONLY,
+ DBG_STATUS_FILTER_SINGLE_HW_ID,
+ DBG_STATUS_TRIGGER_SINGLE_HW_ID,
+ DBG_STATUS_MISSING_TRIGGER_STATE_STORM,
+ MAX_DBG_STATUS
+};
+
+/* Debug Storms IDs */
+enum dbg_storms {
+ DBG_TSTORM_ID,
+ DBG_MSTORM_ID,
+ DBG_USTORM_ID,
+ DBG_XSTORM_ID,
+ DBG_YSTORM_ID,
+ DBG_PSTORM_ID,
+ MAX_DBG_STORMS
+};
+
+/* Idle Check data */
+struct idle_chk_data {
+ u32 buf_size;
+ u8 buf_size_set;
+ u8 reserved1;
+ u16 reserved2;
+};
+
+struct pretend_params {
+ u8 split_type;
+ u8 reserved;
+ u16 split_id;
+};
+
+/* Debug Tools data (per HW function)
+ */
+struct dbg_tools_data {
+ struct dbg_grc_data grc;
+ struct dbg_bus_data bus;
+ struct idle_chk_data idle_chk;
+ u8 mode_enable[40];
+ u8 block_in_reset[132];
+ u8 chip_id;
+ u8 hw_type;
+ u8 num_ports;
+ u8 num_pfs_per_port;
+ u8 num_vfs;
+ u8 initialized;
+ u8 use_dmae;
+ u8 reserved;
+ struct pretend_params pretend;
+ u32 num_regs_read;
+};
+
+/* ILT Clients */
+enum ilt_clients {
+ ILT_CLI_CDUC,
+ ILT_CLI_CDUT,
+ ILT_CLI_QM,
+ ILT_CLI_TM,
+ ILT_CLI_SRC,
+ ILT_CLI_TSDM,
+ ILT_CLI_RGFS,
+ ILT_CLI_TGFS,
+ MAX_ILT_CLIENTS
+};
+
+/***************************** Public Functions *******************************/
+
+/**
+ * qed_dbg_set_bin_ptr(): Sets a pointer to the binary data with debug
+ * arrays.
+ *
+ * @p_hwfn: HW device data.
+ * @bin_ptr: A pointer to the binary data with debug arrays.
+ *
+ * Return: enum dbg status.
+ */
+enum dbg_status qed_dbg_set_bin_ptr(struct qed_hwfn *p_hwfn,
+ const u8 * const bin_ptr);
+
+/**
+ * qed_read_regs(): Reads registers into a buffer (using GRC).
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @buf: Destination buffer.
+ * @addr: Source GRC address in dwords.
+ * @len: Number of registers to read.
+ *
+ * Return: Void.
+ */
+void qed_read_regs(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *buf, u32 addr, u32 len);
+
+/**
+ * qed_read_fw_info(): Reads FW info from the chip.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @fw_info: (Out) a pointer to write the FW info into.
+ *
+ * Return: True if the FW info was read successfully from one of the Storms,
+ * or false if all Storms are in reset.
+ *
+ * The FW info contains FW-related information, such as the FW version,
+ * FW image (main/L2B/kuku), FW timestamp, etc.
+ * The FW info is read from the internal RAM of the first Storm that is not in
+ * reset.
+ */
+bool qed_read_fw_info(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, struct fw_info *fw_info);
+/**
+ * qed_dbg_grc_config(): Sets the value of a GRC parameter.
+ *
+ * @p_hwfn: HW device data.
+ * @grc_param: GRC parameter.
+ * @val: Value to set.
+ *
+ * Return: Error if one of the following holds:
+ * - The version wasn't set.
+ * - Grc_param is invalid.
+ * - Val is outside the allowed boundaries.
+ */
+enum dbg_status qed_dbg_grc_config(struct qed_hwfn *p_hwfn,
+ enum dbg_grc_params grc_param, u32 val);
+
+/**
+ * qed_dbg_grc_set_params_default(): Reverts all GRC parameters to their
+ * default value.
+ *
+ * @p_hwfn: HW device data.
+ *
+ * Return: Void.
+ */
+void qed_dbg_grc_set_params_default(struct qed_hwfn *p_hwfn);
+/**
+ * qed_dbg_grc_get_dump_buf_size(): Returns the required buffer size for
+ * GRC Dump.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @buf_size: (OUT) required buffer size (in dwords) for the GRC Dump
+ * data.
+ *
+ * Return: Error if one of the following holds:
+ * - The version wasn't set
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_grc_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size);
+
+/**
+ * qed_dbg_grc_dump(): Dumps GRC data into the specified buffer.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @dump_buf: Pointer to write the collected GRC data into.
+ * @buf_size_in_dwords:Size of the specified buffer in dwords.
+ * @num_dumped_dwords: (OUT) number of dumped dwords.
+ *
+ * Return: Error if one of the following holds:
+ * - The version wasn't set.
+ * - The specified dump buffer is too small.
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_grc_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ u32 buf_size_in_dwords,
+ u32 *num_dumped_dwords);
+
+/**
+ * qed_dbg_idle_chk_get_dump_buf_size(): Returns the required buffer size
+ * for idle check results.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @buf_size: (OUT) required buffer size (in dwords) for the idle check
+ * data.
+ *
+ * return: Error if one of the following holds:
+ * - The version wasn't set.
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_idle_chk_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size);
+
+/**
+ * qed_dbg_idle_chk_dump: Performs idle check and writes the results
+ * into the specified buffer.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @dump_buf: Pointer to write the idle check data into.
+ * @buf_size_in_dwords: Size of the specified buffer in dwords.
+ * @num_dumped_dwords: (OUT) number of dumped dwords.
+ *
+ * Return: Error if one of the following holds:
+ * - The version wasn't set.
+ * - The specified buffer is too small.
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_idle_chk_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ u32 buf_size_in_dwords,
+ u32 *num_dumped_dwords);
+
+/**
+ * qed_dbg_mcp_trace_get_dump_buf_size(): Returns the required buffer size
+ * for mcp trace results.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @buf_size: (OUT) Required buffer size (in dwords) for mcp trace data.
+ *
+ * Return: Error if one of the following holds:
+ * - The version wasn't set.
+ * - The trace data in MCP scratchpad contain an invalid signature.
+ * - The bundle ID in NVRAM is invalid.
+ * - The trace meta data cannot be found (in NVRAM or image file).
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_mcp_trace_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size);
+
+/**
+ * qed_dbg_mcp_trace_dump(): Performs mcp trace and writes the results
+ * into the specified buffer.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @dump_buf: Pointer to write the mcp trace data into.
+ * @buf_size_in_dwords: Size of the specified buffer in dwords.
+ * @num_dumped_dwords: (OUT) number of dumped dwords.
+ *
+ * Return: Error if one of the following holds:
+ * - The version wasn't set.
+ * - The specified buffer is too small.
+ * - The trace data in MCP scratchpad contain an invalid signature.
+ * - The bundle ID in NVRAM is invalid.
+ * - The trace meta data cannot be found (in NVRAM or image file).
+ * - The trace meta data cannot be read (from NVRAM or image file).
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_mcp_trace_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ u32 buf_size_in_dwords,
+ u32 *num_dumped_dwords);
+
+/**
+ * qed_dbg_reg_fifo_get_dump_buf_size(): Returns the required buffer size
+ * for grc trace fifo results.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @buf_size: (OUT) Required buffer size (in dwords) for reg fifo data.
+ *
+ * Return: Error if one of the following holds:
+ * - The version wasn't set
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_reg_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size);
+
+/**
+ * qed_dbg_reg_fifo_dump(): Reads the reg fifo and writes the results into
+ * the specified buffer.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @dump_buf: Pointer to write the reg fifo data into.
+ * @buf_size_in_dwords: Size of the specified buffer in dwords.
+ * @num_dumped_dwords: (OUT) number of dumped dwords.
+ *
+ * Return: Error if one of the following holds:
+ * - The version wasn't set.
+ * - The specified buffer is too small.
+ * - DMAE transaction failed.
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_reg_fifo_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ u32 buf_size_in_dwords,
+ u32 *num_dumped_dwords);
+
+/**
+ * qed_dbg_igu_fifo_get_dump_buf_size(): Returns the required buffer size
+ * for the IGU fifo results.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @buf_size: (OUT) Required buffer size (in dwords) for the IGU fifo
+ * data.
+ *
+ * Return: Error if one of the following holds:
+ * - The version wasn't set.
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_igu_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size);
+
+/**
+ * qed_dbg_igu_fifo_dump(): Reads the IGU fifo and writes the results into
+ * the specified buffer.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @dump_buf: Pointer to write the IGU fifo data into.
+ * @buf_size_in_dwords: Size of the specified buffer in dwords.
+ * @num_dumped_dwords: (OUT) number of dumped dwords.
+ *
+ * Return: Error if one of the following holds:
+ * - The version wasn't set
+ * - The specified buffer is too small
+ * - DMAE transaction failed
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_igu_fifo_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ u32 buf_size_in_dwords,
+ u32 *num_dumped_dwords);
+
+/**
+ * qed_dbg_protection_override_get_dump_buf_size(): Returns the required
+ * buffer size for protection override window results.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @buf_size: (OUT) Required buffer size (in dwords) for protection
+ * override data.
+ *
+ * Return: Error if one of the following holds:
+ * - The version wasn't set
+ * Otherwise, returns ok.
+ */
+enum dbg_status
+qed_dbg_protection_override_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size);
+/**
+ * qed_dbg_protection_override_dump(): Reads protection override window
+ * entries and writes the results into the specified buffer.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @dump_buf: Pointer to write the protection override data into.
+ * @buf_size_in_dwords: Size of the specified buffer in dwords.
+ * @num_dumped_dwords: (OUT) number of dumped dwords.
+ *
+ * @return: Error if one of the following holds:
+ * - The version wasn't set.
+ * - The specified buffer is too small.
+ * - DMAE transaction failed.
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_protection_override_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ u32 buf_size_in_dwords,
+ u32 *num_dumped_dwords);
+/**
+ * qed_dbg_fw_asserts_get_dump_buf_size(): Returns the required buffer
+ * size for FW Asserts results.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @buf_size: (OUT) Required buffer size (in dwords) for FW Asserts data.
+ *
+ * Return: Error if one of the following holds:
+ * - The version wasn't set.
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_fw_asserts_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size);
+/**
+ * qed_dbg_fw_asserts_dump(): Reads the FW Asserts and writes the results
+ * into the specified buffer.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @dump_buf: Pointer to write the FW Asserts data into.
+ * @buf_size_in_dwords: Size of the specified buffer in dwords.
+ * @num_dumped_dwords: (OUT) number of dumped dwords.
+ *
+ * Return: Error if one of the following holds:
+ * - The version wasn't set.
+ * - The specified buffer is too small.
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_fw_asserts_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ u32 buf_size_in_dwords,
+ u32 *num_dumped_dwords);
+
+/**
+ * qed_dbg_read_attn(): Reads the attention registers of the specified
+ * block and type, and writes the results into the specified buffer.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @block: Block ID.
+ * @attn_type: Attention type.
+ * @clear_status: Indicates if the attention status should be cleared.
+ * @results: (OUT) Pointer to write the read results into.
+ *
+ * Return: Error if one of the following holds:
+ * - The version wasn't set
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_read_attn(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum block_id block,
+ enum dbg_attn_type attn_type,
+ bool clear_status,
+ struct dbg_attn_block_result *results);
+
+/**
+ * qed_dbg_print_attn(): Prints attention registers values in the
+ * specified results struct.
+ *
+ * @p_hwfn: HW device data.
+ * @results: Pointer to the attention read results
+ *
+ * Return: Error if one of the following holds:
+ * - The version wasn't set
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_print_attn(struct qed_hwfn *p_hwfn,
+ struct dbg_attn_block_result *results);
+
+/******************************* Data Types **********************************/
+
+struct mcp_trace_format {
+ u32 data;
+#define MCP_TRACE_FORMAT_MODULE_MASK 0x0000ffff
+#define MCP_TRACE_FORMAT_MODULE_OFFSET 0
+#define MCP_TRACE_FORMAT_LEVEL_MASK 0x00030000
+#define MCP_TRACE_FORMAT_LEVEL_OFFSET 16
+#define MCP_TRACE_FORMAT_P1_SIZE_MASK 0x000c0000
+#define MCP_TRACE_FORMAT_P1_SIZE_OFFSET 18
+#define MCP_TRACE_FORMAT_P2_SIZE_MASK 0x00300000
+#define MCP_TRACE_FORMAT_P2_SIZE_OFFSET 20
+#define MCP_TRACE_FORMAT_P3_SIZE_MASK 0x00c00000
+#define MCP_TRACE_FORMAT_P3_SIZE_OFFSET 22
+#define MCP_TRACE_FORMAT_LEN_MASK 0xff000000
+#define MCP_TRACE_FORMAT_LEN_OFFSET 24
+
+ char *format_str;
+};
+
+/* MCP Trace Meta data structure */
+struct mcp_trace_meta {
+ u32 modules_num;
+ char **modules;
+ u32 formats_num;
+ struct mcp_trace_format *formats;
+ bool is_allocated;
+};
+
+/* Debug Tools user data */
+struct dbg_tools_user_data {
+ struct mcp_trace_meta mcp_trace_meta;
+ const u32 *mcp_trace_user_meta_buf;
+};
+
+/******************************** Constants **********************************/
+
+#define MAX_NAME_LEN 16
+
+/***************************** Public Functions *******************************/
+
+/**
+ * qed_dbg_user_set_bin_ptr(): Sets a pointer to the binary data with
+ * debug arrays.
+ *
+ * @p_hwfn: HW device data.
+ * @bin_ptr: a pointer to the binary data with debug arrays.
+ *
+ * Return: dbg_status.
+ */
+enum dbg_status qed_dbg_user_set_bin_ptr(struct qed_hwfn *p_hwfn,
+ const u8 * const bin_ptr);
+
+/**
+ * qed_dbg_alloc_user_data(): Allocates user debug data.
+ *
+ * @p_hwfn: HW device data.
+ * @user_data_ptr: (OUT) a pointer to the allocated memory.
+ *
+ * Return: dbg_status.
+ */
+enum dbg_status qed_dbg_alloc_user_data(struct qed_hwfn *p_hwfn,
+ void **user_data_ptr);
+
+/**
+ * qed_dbg_get_status_str(): Returns a string for the specified status.
+ *
+ * @status: A debug status code.
+ *
+ * Return: A string for the specified status.
+ */
+const char *qed_dbg_get_status_str(enum dbg_status status);
+
+/**
+ * qed_get_idle_chk_results_buf_size(): Returns the required buffer size
+ * for idle check results (in bytes).
+ *
+ * @p_hwfn: HW device data.
+ * @dump_buf: idle check dump buffer.
+ * @num_dumped_dwords: number of dwords that were dumped.
+ * @results_buf_size: (OUT) required buffer size (in bytes) for the parsed
+ * results.
+ *
+ * Return: Error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_get_idle_chk_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size);
+/**
+ * qed_print_idle_chk_results(): Prints idle check results
+ *
+ * @p_hwfn: HW device data.
+ * @dump_buf: idle check dump buffer.
+ * @num_dumped_dwords: number of dwords that were dumped.
+ * @results_buf: buffer for printing the idle check results.
+ * @num_errors: (OUT) number of errors found in idle check.
+ * @num_warnings: (OUT) number of warnings found in idle check.
+ *
+ * Return: Error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf,
+ u32 *num_errors,
+ u32 *num_warnings);
+
+/**
+ * qed_dbg_mcp_trace_set_meta_data(): Sets the MCP Trace meta data.
+ *
+ * @p_hwfn: HW device data.
+ * @meta_buf: Meta buffer.
+ *
+ * Return: Void.
+ *
+ * Needed in case the MCP Trace dump doesn't contain the meta data (e.g. due to
+ * no NVRAM access).
+ */
+void qed_dbg_mcp_trace_set_meta_data(struct qed_hwfn *p_hwfn,
+ const u32 *meta_buf);
+
+/**
+ * qed_get_mcp_trace_results_buf_size(): Returns the required buffer size
+ * for MCP Trace results (in bytes).
+ *
+ * @p_hwfn: HW device data.
+ * @dump_buf: MCP Trace dump buffer.
+ * @num_dumped_dwords: number of dwords that were dumped.
+ * @results_buf_size: (OUT) required buffer size (in bytes) for the parsed
+ * results.
+ *
+ * Return: Rrror if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_get_mcp_trace_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size);
+
+/**
+ * qed_print_mcp_trace_results(): Prints MCP Trace results
+ *
+ * @p_hwfn: HW device data.
+ * @dump_buf: MCP trace dump buffer, starting from the header.
+ * @num_dumped_dwords: Member of dwords that were dumped.
+ * @results_buf: Buffer for printing the mcp trace results.
+ *
+ * Return: Error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf);
+
+/**
+ * qed_print_mcp_trace_results_cont(): Prints MCP Trace results, and
+ * keeps the MCP trace meta data allocated, to support continuous MCP Trace
+ * parsing. After the continuous parsing ends, mcp_trace_free_meta_data should
+ * be called to free the meta data.
+ *
+ * @p_hwfn: HW device data.
+ * @dump_buf: MVP trace dump buffer, starting from the header.
+ * @results_buf: Buffer for printing the mcp trace results.
+ *
+ * Return: Error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_print_mcp_trace_results_cont(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ char *results_buf);
+
+/**
+ * qed_print_mcp_trace_line(): Prints MCP Trace results for a single line
+ *
+ * @p_hwfn: HW device data.
+ * @dump_buf: MCP trace dump buffer, starting from the header.
+ * @num_dumped_bytes: Number of bytes that were dumped.
+ * @results_buf: Buffer for printing the mcp trace results.
+ *
+ * Return: Error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_print_mcp_trace_line(struct qed_hwfn *p_hwfn,
+ u8 *dump_buf,
+ u32 num_dumped_bytes,
+ char *results_buf);
+
+/**
+ * qed_mcp_trace_free_meta_data(): Frees the MCP Trace meta data.
+ * Should be called after continuous MCP Trace parsing.
+ *
+ * @p_hwfn: HW device data.
+ *
+ * Return: Void.
+ */
+void qed_mcp_trace_free_meta_data(struct qed_hwfn *p_hwfn);
+
+/**
+ * qed_get_reg_fifo_results_buf_size(): Returns the required buffer size
+ * for reg_fifo results (in bytes).
+ *
+ * @p_hwfn: HW device data.
+ * @dump_buf: Reg fifo dump buffer.
+ * @num_dumped_dwords: Number of dwords that were dumped.
+ * @results_buf_size: (OUT) required buffer size (in bytes) for the parsed
+ * results.
+ *
+ * Return: Error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_get_reg_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size);
+
+/**
+ * qed_print_reg_fifo_results(): Prints reg fifo results.
+ *
+ * @p_hwfn: HW device data.
+ * @dump_buf: Reg fifo dump buffer, starting from the header.
+ * @num_dumped_dwords: Number of dwords that were dumped.
+ * @results_buf: Buffer for printing the reg fifo results.
+ *
+ * Return: Error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_print_reg_fifo_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf);
+
+/**
+ * qed_get_igu_fifo_results_buf_size(): Returns the required buffer size
+ * for igu_fifo results (in bytes).
+ *
+ * @p_hwfn: HW device data.
+ * @dump_buf: IGU fifo dump buffer.
+ * @num_dumped_dwords: number of dwords that were dumped.
+ * @results_buf_size: (OUT) required buffer size (in bytes) for the parsed
+ * results.
+ *
+ * Return: Error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_get_igu_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size);
+
+/**
+ * qed_print_igu_fifo_results(): Prints IGU fifo results
+ *
+ * @p_hwfn: HW device data.
+ * @dump_buf: IGU fifo dump buffer, starting from the header.
+ * @num_dumped_dwords: Number of dwords that were dumped.
+ * @results_buf: Buffer for printing the IGU fifo results.
+ *
+ * Return: Error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_print_igu_fifo_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf);
+
+/**
+ * qed_get_protection_override_results_buf_size(): Returns the required
+ * buffer size for protection override results (in bytes).
+ *
+ * @p_hwfn: HW device data.
+ * @dump_buf: Protection override dump buffer.
+ * @num_dumped_dwords: Number of dwords that were dumped.
+ * @results_buf_size: (OUT) required buffer size (in bytes) for the parsed
+ * results.
+ *
+ * Return: Error if the parsing fails, ok otherwise.
+ */
+enum dbg_status
+qed_get_protection_override_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size);
+
+/**
+ * qed_print_protection_override_results(): Prints protection override
+ * results.
+ *
+ * @p_hwfn: HW device data.
+ * @dump_buf: Protection override dump buffer, starting from the header.
+ * @num_dumped_dwords: Number of dwords that were dumped.
+ * @results_buf: Buffer for printing the reg fifo results.
+ *
+ * Return: Error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_print_protection_override_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf);
+
+/**
+ * qed_get_fw_asserts_results_buf_size(): Returns the required buffer size
+ * for FW Asserts results (in bytes).
+ *
+ * @p_hwfn: HW device data.
+ * @dump_buf: FW Asserts dump buffer.
+ * @num_dumped_dwords: number of dwords that were dumped.
+ * @results_buf_size: (OUT) required buffer size (in bytes) for the parsed
+ * results.
+ *
+ * Return: Error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_get_fw_asserts_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size);
+
+/**
+ * qed_print_fw_asserts_results(): Prints FW Asserts results.
+ *
+ * @p_hwfn: HW device data.
+ * @dump_buf: FW Asserts dump buffer, starting from the header.
+ * @num_dumped_dwords: number of dwords that were dumped.
+ * @results_buf: buffer for printing the FW Asserts results.
+ *
+ * Return: Error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf);
+
+/**
+ * qed_dbg_parse_attn(): Parses and prints attention registers values in
+ * the specified results struct.
+ *
+ * @p_hwfn: HW device data.
+ * @results: Pointer to the attention read results
+ *
+ * Return: Error if one of the following holds:
+ * - The version wasn't set.
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn,
+ struct dbg_attn_block_result *results);
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.h b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h
index e1798925b444..ea839e605577 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h
@@ -84,16 +84,17 @@ struct qed_dcbx_mib_meta_data {
extern const struct qed_eth_dcbnl_ops qed_dcbnl_ops_pass;
#ifdef CONFIG_DCB
-int qed_dcbx_get_config_params(struct qed_hwfn *, struct qed_dcbx_set *);
+int qed_dcbx_get_config_params(struct qed_hwfn *p_hwfn,
+ struct qed_dcbx_set *params);
-int qed_dcbx_config_params(struct qed_hwfn *,
- struct qed_ptt *, struct qed_dcbx_set *, bool);
+int qed_dcbx_config_params(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ struct qed_dcbx_set *params, bool hw_commit);
#endif
/* QED local interface routines */
int
-qed_dcbx_mib_update_event(struct qed_hwfn *,
- struct qed_ptt *, enum qed_mib_read_type);
+qed_dcbx_mib_update_event(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ enum qed_mib_read_type type);
int qed_dcbx_info_alloc(struct qed_hwfn *p_hwfn);
void qed_dcbx_info_free(struct qed_hwfn *p_hwfn);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c
index 6ab3e60d4928..e3edca187ddf 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_debug.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
/* QLogic qed NIC Driver
* Copyright (c) 2015 QLogic Corporation
- * Copyright (c) 2019-2020 Marvell International Ltd.
+ * Copyright (c) 2019-2021 Marvell International Ltd.
*/
#include <linux/module.h>
@@ -10,6 +10,7 @@
#include "qed.h"
#include "qed_cxt.h"
#include "qed_hsi.h"
+#include "qed_dbg_hsi.h"
#include "qed_hw.h"
#include "qed_mcp.h"
#include "qed_reg_addr.h"
@@ -121,6 +122,11 @@ static u32 cond0(const u32 *r, const u32 *imm)
return (r[0] & ~r[1]) != imm[0];
}
+static u32 cond14(const u32 *r, const u32 *imm)
+{
+ return (r[0] | imm[0]) != imm[1];
+}
+
static u32 cond1(const u32 *r, const u32 *imm)
{
return r[0] != imm[0];
@@ -172,6 +178,7 @@ static u32(*cond_arr[]) (const u32 *r, const u32 *imm) = {
cond11,
cond12,
cond13,
+ cond14,
};
#define NUM_PHYS_BLOCKS 84
@@ -208,10 +215,61 @@ enum dbg_bus_frame_modes {
DBG_BUS_NUM_FRAME_MODES
};
+/* Debug bus SEMI frame modes */
+enum dbg_bus_semi_frame_modes {
+ DBG_BUS_SEMI_FRAME_MODE_4FAST = 0, /* 4 fast dw */
+ DBG_BUS_SEMI_FRAME_MODE_2FAST_2SLOW = 1, /* 2 fast dw, 2 slow dw */
+ DBG_BUS_SEMI_FRAME_MODE_1FAST_3SLOW = 2, /* 1 fast dw,3 slow dw */
+ DBG_BUS_SEMI_FRAME_MODE_4SLOW = 3, /* 4 slow dw */
+ DBG_BUS_SEMI_NUM_FRAME_MODES
+};
+
+/* Debug bus filter types */
+enum dbg_bus_filter_types {
+ DBG_BUS_FILTER_TYPE_OFF, /* Filter always off */
+ DBG_BUS_FILTER_TYPE_PRE, /* Filter before trigger only */
+ DBG_BUS_FILTER_TYPE_POST, /* Filter after trigger only */
+ DBG_BUS_FILTER_TYPE_ON /* Filter always on */
+};
+
+/* Debug bus pre-trigger recording types */
+enum dbg_bus_pre_trigger_types {
+ DBG_BUS_PRE_TRIGGER_FROM_ZERO, /* Record from time 0 */
+ DBG_BUS_PRE_TRIGGER_NUM_CHUNKS, /* Record some chunks before trigger */
+ DBG_BUS_PRE_TRIGGER_DROP /* Drop data before trigger */
+};
+
+/* Debug bus post-trigger recording types */
+enum dbg_bus_post_trigger_types {
+ DBG_BUS_POST_TRIGGER_RECORD, /* Start recording after trigger */
+ DBG_BUS_POST_TRIGGER_DROP /* Drop data after trigger */
+};
+
+/* Debug bus other engine mode */
+enum dbg_bus_other_engine_modes {
+ DBG_BUS_OTHER_ENGINE_MODE_NONE,
+ DBG_BUS_OTHER_ENGINE_MODE_DOUBLE_BW_TX,
+ DBG_BUS_OTHER_ENGINE_MODE_DOUBLE_BW_RX,
+ DBG_BUS_OTHER_ENGINE_MODE_CROSS_ENGINE_TX,
+ DBG_BUS_OTHER_ENGINE_MODE_CROSS_ENGINE_RX
+};
+
+/* DBG block Framing mode definitions */
+struct framing_mode_defs {
+ u8 id;
+ u8 blocks_dword_mask;
+ u8 storms_dword_mask;
+ u8 semi_framing_mode_id;
+ u8 full_buf_thr;
+};
+
/* Chip constant definitions */
struct chip_defs {
const char *name;
+ u8 dwords_per_cycle;
+ u8 num_framing_modes;
u32 num_ilt_pages;
+ struct framing_mode_defs *framing_modes;
};
/* HW type constant definitions */
@@ -334,7 +392,7 @@ struct split_type_defs {
#define FIELD_BIT_OFFSET(type, field) type ## _ ## field ## _ ## OFFSET
#define FIELD_BIT_SIZE(type, field) type ## _ ## field ## _ ## SIZE
#define FIELD_DWORD_OFFSET(type, field) \
- (int)(FIELD_BIT_OFFSET(type, field) / 32)
+ ((int)(FIELD_BIT_OFFSET(type, field) / 32))
#define FIELD_DWORD_SHIFT(type, field) (FIELD_BIT_OFFSET(type, field) % 32)
#define FIELD_BIT_MASK(type, field) \
(((1 << FIELD_BIT_SIZE(type, field)) - 1) << \
@@ -431,11 +489,13 @@ struct split_type_defs {
#define STATIC_DEBUG_LINE_DWORDS 9
-#define NUM_COMMON_GLOBAL_PARAMS 9
+#define NUM_COMMON_GLOBAL_PARAMS 11
#define MAX_RECURSION_DEPTH 10
+#define FW_IMG_KUKU 0
#define FW_IMG_MAIN 1
+#define FW_IMG_L2B 2
#define REG_FIFO_ELEMENT_DWORDS 2
#define REG_FIFO_DEPTH_ELEMENTS 32
@@ -464,10 +524,25 @@ struct split_type_defs {
/***************************** Constant Arrays *******************************/
+/* DBG block framing mode definitions, in descending preference order */
+static struct framing_mode_defs s_framing_mode_defs[4] = {
+ {DBG_BUS_FRAME_MODE_4ST, 0x0, 0xf,
+ DBG_BUS_SEMI_FRAME_MODE_4FAST,
+ 10},
+ {DBG_BUS_FRAME_MODE_4HW, 0xf, 0x0, DBG_BUS_SEMI_FRAME_MODE_4SLOW,
+ 10},
+ {DBG_BUS_FRAME_MODE_2ST_2HW, 0x3, 0xc,
+ DBG_BUS_SEMI_FRAME_MODE_2FAST_2SLOW, 10},
+ {DBG_BUS_FRAME_MODE_1ST_3HW, 0x7, 0x8,
+ DBG_BUS_SEMI_FRAME_MODE_1FAST_3SLOW, 10}
+};
+
/* Chip constant definitions array */
static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = {
- {"bb", PSWRQ2_REG_ILT_MEMORY_SIZE_BB / 2},
- {"ah", PSWRQ2_REG_ILT_MEMORY_SIZE_K2 / 2}
+ {"bb", 4, DBG_BUS_NUM_FRAME_MODES, PSWRQ2_REG_ILT_MEMORY_SIZE_BB / 2,
+ s_framing_mode_defs},
+ {"ah", 4, DBG_BUS_NUM_FRAME_MODES, PSWRQ2_REG_ILT_MEMORY_SIZE_K2 / 2,
+ s_framing_mode_defs}
};
/* Storm constant definitions array */
@@ -477,8 +552,8 @@ static struct storm_defs s_storm_defs[] = {
{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
true,
TSEM_REG_FAST_MEMORY,
- TSEM_REG_DBG_FRAME_MODE_BB_K2, TSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
- TSEM_REG_SLOW_DBG_MODE_BB_K2, TSEM_REG_DBG_MODE1_CFG_BB_K2,
+ TSEM_REG_DBG_FRAME_MODE, TSEM_REG_SLOW_DBG_ACTIVE,
+ TSEM_REG_SLOW_DBG_MODE, TSEM_REG_DBG_MODE1_CFG,
TSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_DBG_GPRE_VECT,
TCM_REG_CTX_RBC_ACCS,
{TCM_REG_AGG_CON_CTX, TCM_REG_SM_CON_CTX, TCM_REG_AGG_TASK_CTX,
@@ -491,10 +566,10 @@ static struct storm_defs s_storm_defs[] = {
{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
false,
MSEM_REG_FAST_MEMORY,
- MSEM_REG_DBG_FRAME_MODE_BB_K2,
- MSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
- MSEM_REG_SLOW_DBG_MODE_BB_K2,
- MSEM_REG_DBG_MODE1_CFG_BB_K2,
+ MSEM_REG_DBG_FRAME_MODE,
+ MSEM_REG_SLOW_DBG_ACTIVE,
+ MSEM_REG_SLOW_DBG_MODE,
+ MSEM_REG_DBG_MODE1_CFG,
MSEM_REG_SYNC_DBG_EMPTY,
MSEM_REG_DBG_GPRE_VECT,
MCM_REG_CTX_RBC_ACCS,
@@ -508,10 +583,10 @@ static struct storm_defs s_storm_defs[] = {
{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
false,
USEM_REG_FAST_MEMORY,
- USEM_REG_DBG_FRAME_MODE_BB_K2,
- USEM_REG_SLOW_DBG_ACTIVE_BB_K2,
- USEM_REG_SLOW_DBG_MODE_BB_K2,
- USEM_REG_DBG_MODE1_CFG_BB_K2,
+ USEM_REG_DBG_FRAME_MODE,
+ USEM_REG_SLOW_DBG_ACTIVE,
+ USEM_REG_SLOW_DBG_MODE,
+ USEM_REG_DBG_MODE1_CFG,
USEM_REG_SYNC_DBG_EMPTY,
USEM_REG_DBG_GPRE_VECT,
UCM_REG_CTX_RBC_ACCS,
@@ -525,10 +600,10 @@ static struct storm_defs s_storm_defs[] = {
{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
false,
XSEM_REG_FAST_MEMORY,
- XSEM_REG_DBG_FRAME_MODE_BB_K2,
- XSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
- XSEM_REG_SLOW_DBG_MODE_BB_K2,
- XSEM_REG_DBG_MODE1_CFG_BB_K2,
+ XSEM_REG_DBG_FRAME_MODE,
+ XSEM_REG_SLOW_DBG_ACTIVE,
+ XSEM_REG_SLOW_DBG_MODE,
+ XSEM_REG_DBG_MODE1_CFG,
XSEM_REG_SYNC_DBG_EMPTY,
XSEM_REG_DBG_GPRE_VECT,
XCM_REG_CTX_RBC_ACCS,
@@ -541,10 +616,10 @@ static struct storm_defs s_storm_defs[] = {
{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
false,
YSEM_REG_FAST_MEMORY,
- YSEM_REG_DBG_FRAME_MODE_BB_K2,
- YSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
- YSEM_REG_SLOW_DBG_MODE_BB_K2,
- YSEM_REG_DBG_MODE1_CFG_BB_K2,
+ YSEM_REG_DBG_FRAME_MODE,
+ YSEM_REG_SLOW_DBG_ACTIVE,
+ YSEM_REG_SLOW_DBG_MODE,
+ YSEM_REG_DBG_MODE1_CFG,
YSEM_REG_SYNC_DBG_EMPTY,
YSEM_REG_DBG_GPRE_VECT,
YCM_REG_CTX_RBC_ACCS,
@@ -558,10 +633,10 @@ static struct storm_defs s_storm_defs[] = {
{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
true,
PSEM_REG_FAST_MEMORY,
- PSEM_REG_DBG_FRAME_MODE_BB_K2,
- PSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
- PSEM_REG_SLOW_DBG_MODE_BB_K2,
- PSEM_REG_DBG_MODE1_CFG_BB_K2,
+ PSEM_REG_DBG_FRAME_MODE,
+ PSEM_REG_SLOW_DBG_ACTIVE,
+ PSEM_REG_SLOW_DBG_MODE,
+ PSEM_REG_DBG_MODE1_CFG,
PSEM_REG_SYNC_DBG_EMPTY,
PSEM_REG_DBG_GPRE_VECT,
PCM_REG_CTX_RBC_ACCS,
@@ -575,7 +650,8 @@ static struct hw_type_defs s_hw_type_defs[] = {
{"asic", 1, 256, 32768},
{"reserved", 0, 0, 0},
{"reserved2", 0, 0, 0},
- {"reserved3", 0, 0, 0}
+ {"reserved3", 0, 0, 0},
+ {"reserved4", 0, 0, 0}
};
static struct grc_param_defs s_grc_param_defs[] = {
@@ -772,25 +848,25 @@ static struct rbc_reset_defs s_rbc_reset_defs[] = {
static struct phy_defs s_phy_defs[] = {
{"nw_phy", NWS_REG_NWS_CMU_K2,
- PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2_E5,
- PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2_E5,
- PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2_E5,
- PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2_E5},
- {"sgmii_phy", MS_REG_MS_CMU_K2_E5,
- PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2_E5,
- PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2_E5,
- PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2_E5,
- PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2_E5},
- {"pcie_phy0", PHY_PCIE_REG_PHY0_K2_E5,
- PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2_E5,
- PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2_E5,
- PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2_E5,
- PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5},
- {"pcie_phy1", PHY_PCIE_REG_PHY1_K2_E5,
- PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2_E5,
- PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2_E5,
- PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2_E5,
- PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5},
+ PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2,
+ PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2,
+ PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2,
+ PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2},
+ {"sgmii_phy", MS_REG_MS_CMU_K2,
+ PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2,
+ PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2,
+ PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2,
+ PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2},
+ {"pcie_phy0", PHY_PCIE_REG_PHY0_K2,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2},
+ {"pcie_phy1", PHY_PCIE_REG_PHY1_K2,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2},
};
static struct split_type_defs s_split_type_defs[] = {
@@ -810,8 +886,17 @@ static struct split_type_defs s_split_type_defs[] = {
{"vf"}
};
+/******************************** Variables **********************************/
+
+/* The version of the calling app */
+static u32 s_app_ver;
+
/**************************** Private Functions ******************************/
+static void qed_static_asserts(void)
+{
+}
+
/* Reads and returns a single dword from the specified unaligned buffer */
static u32 qed_read_unaligned_dword(u8 *buf)
{
@@ -870,6 +955,9 @@ static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn)
if (dev_data->initialized)
return DBG_STATUS_OK;
+ if (!s_app_ver)
+ return DBG_STATUS_APP_VERSION_NOT_SET;
+
/* Set chip */
if (QED_IS_K2(p_hwfn->cdev)) {
dev_data->chip_id = CHIP_K2;
@@ -990,11 +1078,6 @@ static void qed_read_storm_fw_info(struct qed_hwfn *p_hwfn,
for (i = 0; i < size; i++, addr += BYTES_IN_DWORD)
dest[i] = qed_rd(p_hwfn, p_ptt, addr);
- /* qed_rq() fetches data in CPU byteorder. Swap it back to
- * the device's to get right structure layout.
- */
- cpu_to_le32_array(dest, size);
-
/* Read FW version info from Storm RAM */
size = le32_to_cpu(fw_info_location.size);
if (!size || size > sizeof(*fw_info))
@@ -1006,8 +1089,6 @@ static void qed_read_storm_fw_info(struct qed_hwfn *p_hwfn,
for (i = 0; i < size; i++, addr += BYTES_IN_DWORD)
dest[i] = qed_rd(p_hwfn, p_ptt, addr);
-
- cpu_to_le32_array(dest, size);
}
/* Dumps the specified string to the specified buffer.
@@ -1117,9 +1198,15 @@ static u32 qed_dump_fw_ver_param(struct qed_hwfn *p_hwfn,
DP_NOTICE(p_hwfn,
"Unexpected debug error: invalid FW version string\n");
switch (fw_info.ver.image_id) {
+ case FW_IMG_KUKU:
+ strcpy(fw_img_str, "kuku");
+ break;
case FW_IMG_MAIN:
strcpy(fw_img_str, "main");
break;
+ case FW_IMG_L2B:
+ strcpy(fw_img_str, "l2b");
+ break;
default:
strcpy(fw_img_str, "unknown");
break;
@@ -1255,6 +1342,8 @@ static u32 qed_dump_common_global_params(struct qed_hwfn *p_hwfn,
s_hw_type_defs[dev_data->hw_type].name);
offset += qed_dump_num_param(dump_buf + offset,
dump, "pci-func", p_hwfn->abs_pf_id);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump, "epoch", qed_get_epoch_time());
if (dev_data->chip_id == CHIP_BB)
offset += qed_dump_num_param(dump_buf + offset,
dump, "path", QED_PATH_ID(p_hwfn));
@@ -1590,7 +1679,7 @@ static void qed_grc_stall_storms(struct qed_hwfn *p_hwfn,
continue;
reg_addr = s_storm_defs[storm_id].sem_fast_mem_addr +
- SEM_FAST_REG_STALL_0_BB_K2;
+ SEM_FAST_REG_STALL_0;
qed_wr(p_hwfn, p_ptt, reg_addr, stall ? 1 : 0);
}
@@ -1703,8 +1792,8 @@ static void qed_grc_clear_all_prty(struct qed_hwfn *p_hwfn,
{
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
const struct dbg_attn_reg *attn_reg_arr;
+ u32 block_id, sts_clr_address;
u8 reg_idx, num_attn_regs;
- u32 block_id;
for (block_id = 0; block_id < NUM_PHYS_BLOCKS; block_id++) {
if (dev_data->block_in_reset[block_id])
@@ -1728,16 +1817,103 @@ static void qed_grc_clear_all_prty(struct qed_hwfn *p_hwfn,
GET_FIELD(reg_data->mode.data,
DBG_MODE_HDR_MODES_BUF_OFFSET);
+ sts_clr_address = reg_data->sts_clr_address;
/* If Mode match: clear parity status */
if (!eval_mode ||
qed_is_mode_match(p_hwfn, &modes_buf_offset))
qed_rd(p_hwfn, p_ptt,
- DWORDS_TO_BYTES(reg_data->
- sts_clr_address));
+ DWORDS_TO_BYTES(sts_clr_address));
}
}
}
+/* Finds the meta data image in NVRAM */
+static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 image_type,
+ u32 *nvram_offset_bytes,
+ u32 *nvram_size_bytes)
+{
+ u32 ret_mcp_resp, ret_mcp_param, ret_txn_size;
+ struct mcp_file_att file_att;
+ int nvm_result;
+
+ /* Call NVRAM get file command */
+ nvm_result = qed_mcp_nvm_rd_cmd(p_hwfn,
+ p_ptt,
+ DRV_MSG_CODE_NVM_GET_FILE_ATT,
+ image_type,
+ &ret_mcp_resp,
+ &ret_mcp_param,
+ &ret_txn_size,
+ (u32 *)&file_att, false);
+
+ /* Check response */
+ if (nvm_result || (ret_mcp_resp & FW_MSG_CODE_MASK) !=
+ FW_MSG_CODE_NVM_OK)
+ return DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
+
+ /* Update return values */
+ *nvram_offset_bytes = file_att.nvm_start_addr;
+ *nvram_size_bytes = file_att.len;
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_DEBUG,
+ "find_nvram_image: found NVRAM image of type %d in NVRAM offset %d bytes with size %d bytes\n",
+ image_type, *nvram_offset_bytes, *nvram_size_bytes);
+
+ /* Check alignment */
+ if (*nvram_size_bytes & 0x3)
+ return DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE;
+
+ return DBG_STATUS_OK;
+}
+
+/* Reads data from NVRAM */
+static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 nvram_offset_bytes,
+ u32 nvram_size_bytes, u32 *ret_buf)
+{
+ u32 ret_mcp_resp, ret_mcp_param, ret_read_size, bytes_to_copy;
+ s32 bytes_left = nvram_size_bytes;
+ u32 read_offset = 0, param = 0;
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_DEBUG,
+ "nvram_read: reading image of size %d bytes from NVRAM\n",
+ nvram_size_bytes);
+
+ do {
+ bytes_to_copy =
+ (bytes_left >
+ MCP_DRV_NVM_BUF_LEN) ? MCP_DRV_NVM_BUF_LEN : bytes_left;
+
+ /* Call NVRAM read command */
+ SET_MFW_FIELD(param,
+ DRV_MB_PARAM_NVM_OFFSET,
+ nvram_offset_bytes + read_offset);
+ SET_MFW_FIELD(param, DRV_MB_PARAM_NVM_LEN, bytes_to_copy);
+ if (qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
+ DRV_MSG_CODE_NVM_READ_NVRAM, param,
+ &ret_mcp_resp,
+ &ret_mcp_param, &ret_read_size,
+ (u32 *)((u8 *)ret_buf + read_offset),
+ false))
+ return DBG_STATUS_NVRAM_READ_FAILED;
+
+ /* Check response */
+ if ((ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
+ return DBG_STATUS_NVRAM_READ_FAILED;
+
+ /* Update read offset */
+ read_offset += ret_read_size;
+ bytes_left -= ret_read_size;
+ } while (bytes_left > 0);
+
+ return DBG_STATUS_OK;
+}
+
/* Dumps GRC registers section header. Returns the dumped size in dwords.
* the following parameters are dumped:
* - count: no. of dumped entries
@@ -3189,17 +3365,6 @@ static u32 qed_grc_dump_phy(struct qed_hwfn *p_hwfn,
return offset;
}
-static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 image_type,
- u32 *nvram_offset_bytes,
- u32 *nvram_size_bytes);
-
-static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 nvram_offset_bytes,
- u32 nvram_size_bytes, u32 *ret_buf);
-
/* Dumps the MCP HW dump from NVRAM. Returns the dumped size in dwords. */
static u32 qed_grc_dump_mcp_hw_dump(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
@@ -3283,10 +3448,6 @@ static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn,
has_dbg_bus = GET_FIELD(block_per_chip->flags,
DBG_BLOCK_CHIP_HAS_DBG_BUS);
- /* read+clear for NWS parity is not working, skip NWS block */
- if (block_id == BLOCK_NWS)
- continue;
-
if (!is_removed && has_dbg_bus &&
GET_FIELD(block_per_chip->dbg_bus_mode.data,
DBG_MODE_HDR_EVAL_MODE) > 0) {
@@ -3375,8 +3536,8 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
bool dump, u32 *num_dumped_dwords)
{
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
- u32 dwords_read, offset = 0;
bool parities_masked = false;
+ u32 dwords_read, offset = 0;
u8 i;
*num_dumped_dwords = 0;
@@ -3545,8 +3706,7 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
*/
static u32 qed_idle_chk_dump_failure(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
- u32 *
- dump_buf,
+ u32 *dump_buf,
bool dump,
u16 rule_id,
const struct dbg_idle_chk_rule *rule,
@@ -3894,91 +4054,6 @@ static u32 qed_idle_chk_dump(struct qed_hwfn *p_hwfn,
return offset;
}
-/* Finds the meta data image in NVRAM */
-static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 image_type,
- u32 *nvram_offset_bytes,
- u32 *nvram_size_bytes)
-{
- u32 ret_mcp_resp, ret_mcp_param, ret_txn_size;
- struct mcp_file_att file_att;
- int nvm_result;
-
- /* Call NVRAM get file command */
- nvm_result = qed_mcp_nvm_rd_cmd(p_hwfn,
- p_ptt,
- DRV_MSG_CODE_NVM_GET_FILE_ATT,
- image_type,
- &ret_mcp_resp,
- &ret_mcp_param,
- &ret_txn_size, (u32 *)&file_att);
-
- /* Check response */
- if (nvm_result ||
- (ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
- return DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
-
- /* Update return values */
- *nvram_offset_bytes = file_att.nvm_start_addr;
- *nvram_size_bytes = file_att.len;
-
- DP_VERBOSE(p_hwfn,
- QED_MSG_DEBUG,
- "find_nvram_image: found NVRAM image of type %d in NVRAM offset %d bytes with size %d bytes\n",
- image_type, *nvram_offset_bytes, *nvram_size_bytes);
-
- /* Check alignment */
- if (*nvram_size_bytes & 0x3)
- return DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE;
-
- return DBG_STATUS_OK;
-}
-
-/* Reads data from NVRAM */
-static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 nvram_offset_bytes,
- u32 nvram_size_bytes, u32 *ret_buf)
-{
- u32 ret_mcp_resp, ret_mcp_param, ret_read_size, bytes_to_copy;
- s32 bytes_left = nvram_size_bytes;
- u32 read_offset = 0, param = 0;
-
- DP_VERBOSE(p_hwfn,
- QED_MSG_DEBUG,
- "nvram_read: reading image of size %d bytes from NVRAM\n",
- nvram_size_bytes);
-
- do {
- bytes_to_copy =
- (bytes_left >
- MCP_DRV_NVM_BUF_LEN) ? MCP_DRV_NVM_BUF_LEN : bytes_left;
-
- /* Call NVRAM read command */
- SET_MFW_FIELD(param,
- DRV_MB_PARAM_NVM_OFFSET,
- nvram_offset_bytes + read_offset);
- SET_MFW_FIELD(param, DRV_MB_PARAM_NVM_LEN, bytes_to_copy);
- if (qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
- DRV_MSG_CODE_NVM_READ_NVRAM, param,
- &ret_mcp_resp,
- &ret_mcp_param, &ret_read_size,
- (u32 *)((u8 *)ret_buf + read_offset)))
- return DBG_STATUS_NVRAM_READ_FAILED;
-
- /* Check response */
- if ((ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
- return DBG_STATUS_NVRAM_READ_FAILED;
-
- /* Update read offset */
- read_offset += ret_read_size;
- bytes_left -= ret_read_size;
- } while (bytes_left > 0);
-
- return DBG_STATUS_OK;
-}
-
/* Get info on the MCP Trace data in the scratchpad:
* - trace_data_grc_addr (OUT): trace data GRC address in bytes
* - trace_data_size (OUT): trace data size in bytes (without the header)
@@ -4480,14 +4555,18 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
/* Dumps the specified ILT pages to the specified buffer.
* Returns the dumped size in dwords.
*/
-static u32 qed_ilt_dump_pages_range(u32 *dump_buf,
- bool dump,
- u32 start_page_id,
+static u32 qed_ilt_dump_pages_range(u32 *dump_buf, u32 *given_offset,
+ bool *dump, u32 start_page_id,
u32 num_pages,
struct phys_mem_desc *ilt_pages,
- bool dump_page_ids)
+ bool dump_page_ids, u32 buf_size_in_dwords,
+ u32 *given_actual_dump_size_in_dwords)
{
- u32 page_id, end_page_id, offset = 0;
+ u32 actual_dump_size_in_dwords = *given_actual_dump_size_in_dwords;
+ u32 page_id, end_page_id, offset = *given_offset;
+ struct phys_mem_desc *mem_desc = NULL;
+ bool continue_dump = *dump;
+ u32 partial_page_size = 0;
if (num_pages == 0)
return offset;
@@ -4495,31 +4574,51 @@ static u32 qed_ilt_dump_pages_range(u32 *dump_buf,
end_page_id = start_page_id + num_pages - 1;
for (page_id = start_page_id; page_id <= end_page_id; page_id++) {
- struct phys_mem_desc *mem_desc = &ilt_pages[page_id];
-
- /**
- *
- * if (page_id >= ->p_cxt_mngr->ilt_shadow_size)
- * break;
- */
-
+ mem_desc = &ilt_pages[page_id];
if (!ilt_pages[page_id].virt_addr)
continue;
if (dump_page_ids) {
- /* Copy page ID to dump buffer */
- if (dump)
+ /* Copy page ID to dump buffer
+ * (if dump is needed and buffer is not full)
+ */
+ if ((continue_dump) &&
+ (offset + 1 > buf_size_in_dwords)) {
+ continue_dump = false;
+ actual_dump_size_in_dwords = offset;
+ }
+ if (continue_dump)
*(dump_buf + offset) = page_id;
offset++;
} else {
/* Copy page memory to dump buffer */
- if (dump)
+ if ((continue_dump) &&
+ (offset + BYTES_TO_DWORDS(mem_desc->size) >
+ buf_size_in_dwords)) {
+ if (offset + BYTES_TO_DWORDS(mem_desc->size) >
+ buf_size_in_dwords) {
+ partial_page_size =
+ buf_size_in_dwords - offset;
+ memcpy(dump_buf + offset,
+ mem_desc->virt_addr,
+ partial_page_size);
+ continue_dump = false;
+ actual_dump_size_in_dwords =
+ offset + partial_page_size;
+ }
+ }
+
+ if (continue_dump)
memcpy(dump_buf + offset,
mem_desc->virt_addr, mem_desc->size);
offset += BYTES_TO_DWORDS(mem_desc->size);
}
}
+ *dump = continue_dump;
+ *given_offset = offset;
+ *given_actual_dump_size_in_dwords = actual_dump_size_in_dwords;
+
return offset;
}
@@ -4528,21 +4627,30 @@ static u32 qed_ilt_dump_pages_range(u32 *dump_buf,
*/
static u32 qed_ilt_dump_pages_section(struct qed_hwfn *p_hwfn,
u32 *dump_buf,
- bool dump,
+ u32 *given_offset,
+ bool *dump,
u32 valid_conn_pf_pages,
u32 valid_conn_vf_pages,
struct phys_mem_desc *ilt_pages,
- bool dump_page_ids)
+ bool dump_page_ids,
+ u32 buf_size_in_dwords,
+ u32 *given_actual_dump_size_in_dwords)
{
struct qed_ilt_client_cfg *clients = p_hwfn->p_cxt_mngr->clients;
- u32 pf_start_line, start_page_id, offset = 0;
+ u32 pf_start_line, start_page_id, offset = *given_offset;
u32 cdut_pf_init_pages, cdut_vf_init_pages;
u32 cdut_pf_work_pages, cdut_vf_work_pages;
u32 base_data_offset, size_param_offset;
+ u32 src_pages;
+ u32 section_header_and_param_size;
u32 cdut_pf_pages, cdut_vf_pages;
+ u32 actual_dump_size_in_dwords;
+ bool continue_dump = *dump;
+ bool update_size = *dump;
const char *section_name;
- u8 i;
+ u32 i;
+ actual_dump_size_in_dwords = *given_actual_dump_size_in_dwords;
section_name = dump_page_ids ? "ilt_page_ids" : "ilt_page_mem";
cdut_pf_init_pages = qed_get_cdut_num_pf_init_pages(p_hwfn);
cdut_vf_init_pages = qed_get_cdut_num_vf_init_pages(p_hwfn);
@@ -4551,13 +4659,26 @@ static u32 qed_ilt_dump_pages_section(struct qed_hwfn *p_hwfn,
cdut_pf_pages = cdut_pf_init_pages + cdut_pf_work_pages;
cdut_vf_pages = cdut_vf_init_pages + cdut_vf_work_pages;
pf_start_line = p_hwfn->p_cxt_mngr->pf_start_line;
+ section_header_and_param_size = qed_dump_section_hdr(NULL,
+ false,
+ section_name,
+ 1) +
+ qed_dump_num_param(NULL, false, "size", 0);
+
+ if ((continue_dump) &&
+ (offset + section_header_and_param_size > buf_size_in_dwords)) {
+ continue_dump = false;
+ update_size = false;
+ actual_dump_size_in_dwords = offset;
+ }
- offset +=
- qed_dump_section_hdr(dump_buf + offset, dump, section_name, 1);
+ offset += qed_dump_section_hdr(dump_buf + offset,
+ continue_dump, section_name, 1);
/* Dump size parameter (0 for now, overwritten with real size later) */
size_param_offset = offset;
- offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
+ offset += qed_dump_num_param(dump_buf + offset,
+ continue_dump, "size", 0);
base_data_offset = offset;
/* CDUC pages are ordered as follows:
@@ -4570,22 +4691,22 @@ static u32 qed_ilt_dump_pages_section(struct qed_hwfn *p_hwfn,
if (qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_DUMP_ILT_CDUC)) {
/* Dump connection PF pages */
start_page_id = clients[ILT_CLI_CDUC].first.val - pf_start_line;
- offset += qed_ilt_dump_pages_range(dump_buf + offset,
- dump,
- start_page_id,
- valid_conn_pf_pages,
- ilt_pages, dump_page_ids);
+ qed_ilt_dump_pages_range(dump_buf, &offset, &continue_dump,
+ start_page_id, valid_conn_pf_pages,
+ ilt_pages, dump_page_ids,
+ buf_size_in_dwords,
+ &actual_dump_size_in_dwords);
/* Dump connection VF pages */
start_page_id += clients[ILT_CLI_CDUC].pf_total_lines;
for (i = 0; i < p_hwfn->p_cxt_mngr->vf_count;
i++, start_page_id += clients[ILT_CLI_CDUC].vf_total_lines)
- offset += qed_ilt_dump_pages_range(dump_buf + offset,
- dump,
- start_page_id,
- valid_conn_vf_pages,
- ilt_pages,
- dump_page_ids);
+ qed_ilt_dump_pages_range(dump_buf, &offset,
+ &continue_dump, start_page_id,
+ valid_conn_vf_pages,
+ ilt_pages, dump_page_ids,
+ buf_size_in_dwords,
+ &actual_dump_size_in_dwords);
}
/* CDUT pages are ordered as follows:
@@ -4599,63 +4720,84 @@ static u32 qed_ilt_dump_pages_section(struct qed_hwfn *p_hwfn,
/* Dump task PF pages */
start_page_id = clients[ILT_CLI_CDUT].first.val +
cdut_pf_init_pages - pf_start_line;
- offset += qed_ilt_dump_pages_range(dump_buf + offset,
- dump,
- start_page_id,
- cdut_pf_work_pages,
- ilt_pages, dump_page_ids);
+ qed_ilt_dump_pages_range(dump_buf, &offset, &continue_dump,
+ start_page_id, cdut_pf_work_pages,
+ ilt_pages, dump_page_ids,
+ buf_size_in_dwords,
+ &actual_dump_size_in_dwords);
/* Dump task VF pages */
start_page_id = clients[ILT_CLI_CDUT].first.val +
cdut_pf_pages + cdut_vf_init_pages - pf_start_line;
for (i = 0; i < p_hwfn->p_cxt_mngr->vf_count;
i++, start_page_id += cdut_vf_pages)
- offset += qed_ilt_dump_pages_range(dump_buf + offset,
- dump,
- start_page_id,
- cdut_vf_work_pages,
- ilt_pages,
- dump_page_ids);
+ qed_ilt_dump_pages_range(dump_buf, &offset,
+ &continue_dump, start_page_id,
+ cdut_vf_work_pages, ilt_pages,
+ dump_page_ids,
+ buf_size_in_dwords,
+ &actual_dump_size_in_dwords);
+ }
+
+ /*Dump Searcher pages */
+ if (clients[ILT_CLI_SRC].active) {
+ start_page_id = clients[ILT_CLI_SRC].first.val - pf_start_line;
+ src_pages = clients[ILT_CLI_SRC].last.val -
+ clients[ILT_CLI_SRC].first.val + 1;
+ qed_ilt_dump_pages_range(dump_buf, &offset, &continue_dump,
+ start_page_id, src_pages, ilt_pages,
+ dump_page_ids, buf_size_in_dwords,
+ &actual_dump_size_in_dwords);
}
/* Overwrite size param */
- if (dump)
- qed_dump_num_param(dump_buf + size_param_offset,
- dump, "size", offset - base_data_offset);
+ if (update_size) {
+ u32 section_size = (*dump == continue_dump) ?
+ offset - base_data_offset :
+ actual_dump_size_in_dwords - base_data_offset;
+ if (section_size > 0)
+ qed_dump_num_param(dump_buf + size_param_offset,
+ *dump, "size", section_size);
+ else if ((section_size == 0) && (*dump != continue_dump))
+ actual_dump_size_in_dwords -=
+ section_header_and_param_size;
+ }
+
+ *dump = continue_dump;
+ *given_offset = offset;
+ *given_actual_dump_size_in_dwords = actual_dump_size_in_dwords;
return offset;
}
-/* Performs ILT Dump to the specified buffer.
+/* Dumps a section containing the global parameters.
+ * Part of ilt dump process
* Returns the dumped size in dwords.
*/
-static u32 qed_ilt_dump(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
+static u32
+qed_ilt_dump_dump_common_global_params(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ bool dump,
+ u32 cduc_page_size,
+ u32 conn_ctx_size,
+ u32 cdut_page_size,
+ u32 *full_dump_size_param_offset,
+ u32 *actual_dump_size_param_offset)
{
struct qed_ilt_client_cfg *clients = p_hwfn->p_cxt_mngr->clients;
- u32 valid_conn_vf_cids, valid_conn_vf_pages, offset = 0;
- u32 valid_conn_pf_cids, valid_conn_pf_pages, num_pages;
- u32 num_cids_per_page, conn_ctx_size;
- u32 cduc_page_size, cdut_page_size;
- struct phys_mem_desc *ilt_pages;
- u8 conn_type;
-
- cduc_page_size = 1 <<
- (clients[ILT_CLI_CDUC].p_size.val + PXP_ILT_PAGE_SIZE_NUM_BITS_MIN);
- cdut_page_size = 1 <<
- (clients[ILT_CLI_CDUT].p_size.val + PXP_ILT_PAGE_SIZE_NUM_BITS_MIN);
- conn_ctx_size = p_hwfn->p_cxt_mngr->conn_ctx_size;
- num_cids_per_page = (int)(cduc_page_size / conn_ctx_size);
- ilt_pages = p_hwfn->p_cxt_mngr->ilt_shadow;
+ u32 offset = 0;
- /* Dump global params - 22 must match number of params below */
offset += qed_dump_common_global_params(p_hwfn, p_ptt,
- dump_buf + offset, dump, 22);
+ dump_buf + offset,
+ dump, 30);
offset += qed_dump_str_param(dump_buf + offset,
- dump, "dump-type", "ilt-dump");
+ dump,
+ "dump-type", "ilt-dump");
offset += qed_dump_num_param(dump_buf + offset,
dump,
- "cduc-page-size", cduc_page_size);
+ "cduc-page-size",
+ cduc_page_size);
offset += qed_dump_num_param(dump_buf + offset,
dump,
"cduc-first-page-id",
@@ -4667,20 +4809,19 @@ static u32 qed_ilt_dump(struct qed_hwfn *p_hwfn,
offset += qed_dump_num_param(dump_buf + offset,
dump,
"cduc-num-pf-pages",
- clients
- [ILT_CLI_CDUC].pf_total_lines);
+ clients[ILT_CLI_CDUC].pf_total_lines);
offset += qed_dump_num_param(dump_buf + offset,
dump,
"cduc-num-vf-pages",
- clients
- [ILT_CLI_CDUC].vf_total_lines);
+ clients[ILT_CLI_CDUC].vf_total_lines);
offset += qed_dump_num_param(dump_buf + offset,
dump,
"max-conn-ctx-size",
conn_ctx_size);
offset += qed_dump_num_param(dump_buf + offset,
dump,
- "cdut-page-size", cdut_page_size);
+ "cdut-page-size",
+ cdut_page_size);
offset += qed_dump_num_param(dump_buf + offset,
dump,
"cdut-first-page-id",
@@ -4711,19 +4852,16 @@ static u32 qed_ilt_dump(struct qed_hwfn *p_hwfn,
p_hwfn->p_cxt_mngr->task_ctx_size);
offset += qed_dump_num_param(dump_buf + offset,
dump,
- "task-type-id",
- p_hwfn->p_cxt_mngr->task_type_id);
- offset += qed_dump_num_param(dump_buf + offset,
- dump,
"first-vf-id-in-pf",
p_hwfn->p_cxt_mngr->first_vf_in_pf);
- offset += /* 18 */ qed_dump_num_param(dump_buf + offset,
- dump,
- "num-vfs-in-pf",
- p_hwfn->p_cxt_mngr->vf_count);
offset += qed_dump_num_param(dump_buf + offset,
dump,
- "ptr-size-bytes", sizeof(void *));
+ "num-vfs-in-pf",
+ p_hwfn->p_cxt_mngr->vf_count);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "ptr-size-bytes",
+ sizeof(void *));
offset += qed_dump_num_param(dump_buf + offset,
dump,
"pf-start-line",
@@ -4736,58 +4874,281 @@ static u32 qed_ilt_dump(struct qed_hwfn *p_hwfn,
dump,
"ilt-shadow-size",
p_hwfn->p_cxt_mngr->ilt_shadow_size);
+
+ *full_dump_size_param_offset = offset;
+
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump, "dump-size-full", 0);
+
+ *actual_dump_size_param_offset = offset;
+
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "dump-size-actual", 0);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "iscsi_task_pages",
+ p_hwfn->p_cxt_mngr->iscsi_task_pages);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "fcoe_task_pages",
+ p_hwfn->p_cxt_mngr->fcoe_task_pages);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "roce_task_pages",
+ p_hwfn->p_cxt_mngr->roce_task_pages);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "eth_task_pages",
+ p_hwfn->p_cxt_mngr->eth_task_pages);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "src-first-page-id",
+ clients[ILT_CLI_SRC].first.val);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "src-last-page-id",
+ clients[ILT_CLI_SRC].last.val);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "src-is-active",
+ clients[ILT_CLI_SRC].active);
+
/* Additional/Less parameters require matching of number in call to
* dump_common_global_params()
*/
- /* Dump section containing number of PF CIDs per connection type */
+ return offset;
+}
+
+/* Dump section containing number of PF CIDs per connection type.
+ * Part of ilt dump process.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_ilt_dump_dump_num_pf_cids(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ bool dump, u32 *valid_conn_pf_cids)
+{
+ u32 num_pf_cids = 0;
+ u32 offset = 0;
+ u8 conn_type;
+
offset += qed_dump_section_hdr(dump_buf + offset,
dump, "num_pf_cids_per_conn_type", 1);
offset += qed_dump_num_param(dump_buf + offset,
- dump, "size", NUM_OF_CONNECTION_TYPES_E4);
- for (conn_type = 0, valid_conn_pf_cids = 0;
- conn_type < NUM_OF_CONNECTION_TYPES_E4; conn_type++, offset++) {
- u32 num_pf_cids =
- p_hwfn->p_cxt_mngr->conn_cfg[conn_type].cid_count;
-
+ dump, "size", NUM_OF_CONNECTION_TYPES);
+ for (conn_type = 0, *valid_conn_pf_cids = 0;
+ conn_type < NUM_OF_CONNECTION_TYPES; conn_type++, offset++) {
+ num_pf_cids = p_hwfn->p_cxt_mngr->conn_cfg[conn_type].cid_count;
if (dump)
*(dump_buf + offset) = num_pf_cids;
- valid_conn_pf_cids += num_pf_cids;
+ *valid_conn_pf_cids += num_pf_cids;
}
- /* Dump section containing number of VF CIDs per connection type */
- offset += qed_dump_section_hdr(dump_buf + offset,
- dump, "num_vf_cids_per_conn_type", 1);
+ return offset;
+}
+
+/* Dump section containing number of VF CIDs per connection type
+ * Part of ilt dump process.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_ilt_dump_dump_num_vf_cids(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ bool dump, u32 *valid_conn_vf_cids)
+{
+ u32 num_vf_cids = 0;
+ u32 offset = 0;
+ u8 conn_type;
+
+ offset += qed_dump_section_hdr(dump_buf + offset, dump,
+ "num_vf_cids_per_conn_type", 1);
offset += qed_dump_num_param(dump_buf + offset,
- dump, "size", NUM_OF_CONNECTION_TYPES_E4);
- for (conn_type = 0, valid_conn_vf_cids = 0;
- conn_type < NUM_OF_CONNECTION_TYPES_E4; conn_type++, offset++) {
- u32 num_vf_cids =
+ dump, "size", NUM_OF_CONNECTION_TYPES);
+ for (conn_type = 0, *valid_conn_vf_cids = 0;
+ conn_type < NUM_OF_CONNECTION_TYPES; conn_type++, offset++) {
+ num_vf_cids =
p_hwfn->p_cxt_mngr->conn_cfg[conn_type].cids_per_vf;
-
if (dump)
*(dump_buf + offset) = num_vf_cids;
- valid_conn_vf_cids += num_vf_cids;
+ *valid_conn_vf_cids += num_vf_cids;
+ }
+
+ return offset;
+}
+
+/* Performs ILT Dump to the specified buffer.
+ * buf_size_in_dwords - The dumped buffer size.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_ilt_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf, u32 buf_size_in_dwords, bool dump)
+{
+#if ((!defined VMWARE) && (!defined UEFI))
+ struct qed_ilt_client_cfg *clients = p_hwfn->p_cxt_mngr->clients;
+#endif
+ u32 valid_conn_vf_cids = 0,
+ valid_conn_vf_pages, offset = 0, real_dumped_size = 0;
+ u32 valid_conn_pf_cids = 0, valid_conn_pf_pages, num_pages;
+ u32 num_cids_per_page, conn_ctx_size;
+ u32 cduc_page_size, cdut_page_size;
+ u32 actual_dump_size_in_dwords = 0;
+ struct phys_mem_desc *ilt_pages;
+ u32 actul_dump_off = 0;
+ u32 last_section_size;
+ u32 full_dump_off = 0;
+ u32 section_size = 0;
+ bool continue_dump;
+ u32 page_id;
+
+ last_section_size = qed_dump_last_section(NULL, 0, false);
+ cduc_page_size = 1 <<
+ (clients[ILT_CLI_CDUC].p_size.val + PXP_ILT_PAGE_SIZE_NUM_BITS_MIN);
+ cdut_page_size = 1 <<
+ (clients[ILT_CLI_CDUT].p_size.val + PXP_ILT_PAGE_SIZE_NUM_BITS_MIN);
+ conn_ctx_size = p_hwfn->p_cxt_mngr->conn_ctx_size;
+ num_cids_per_page = (int)(cduc_page_size / conn_ctx_size);
+ ilt_pages = p_hwfn->p_cxt_mngr->ilt_shadow;
+ continue_dump = dump;
+
+ /* if need to dump then save memory for the last section
+ * (last section calculates CRC of dumped data)
+ */
+ if (dump) {
+ if (buf_size_in_dwords >= last_section_size) {
+ buf_size_in_dwords -= last_section_size;
+ } else {
+ continue_dump = false;
+ actual_dump_size_in_dwords = offset;
+ }
}
- /* Dump section containing physical memory descs for each ILT page */
+ /* Dump global params */
+
+ /* if need to dump then first check that there is enough memory
+ * in dumped buffer for this section calculate the size of this
+ * section without dumping. if there is not enough memory - then
+ * stop the dumping.
+ */
+ if (continue_dump) {
+ section_size =
+ qed_ilt_dump_dump_common_global_params(p_hwfn,
+ p_ptt,
+ NULL,
+ false,
+ cduc_page_size,
+ conn_ctx_size,
+ cdut_page_size,
+ &full_dump_off,
+ &actul_dump_off);
+ if (offset + section_size > buf_size_in_dwords) {
+ continue_dump = false;
+ actual_dump_size_in_dwords = offset;
+ }
+ }
+
+ offset += qed_ilt_dump_dump_common_global_params(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ continue_dump,
+ cduc_page_size,
+ conn_ctx_size,
+ cdut_page_size,
+ &full_dump_off,
+ &actul_dump_off);
+
+ /* Dump section containing number of PF CIDs per connection type
+ * If need to dump then first check that there is enough memory in
+ * dumped buffer for this section.
+ */
+ if (continue_dump) {
+ section_size =
+ qed_ilt_dump_dump_num_pf_cids(p_hwfn,
+ NULL,
+ false,
+ &valid_conn_pf_cids);
+ if (offset + section_size > buf_size_in_dwords) {
+ continue_dump = false;
+ actual_dump_size_in_dwords = offset;
+ }
+ }
+
+ offset += qed_ilt_dump_dump_num_pf_cids(p_hwfn,
+ dump_buf + offset,
+ continue_dump,
+ &valid_conn_pf_cids);
+
+ /* Dump section containing number of VF CIDs per connection type
+ * If need to dump then first check that there is enough memory in
+ * dumped buffer for this section.
+ */
+ if (continue_dump) {
+ section_size =
+ qed_ilt_dump_dump_num_vf_cids(p_hwfn,
+ NULL,
+ false,
+ &valid_conn_vf_cids);
+ if (offset + section_size > buf_size_in_dwords) {
+ continue_dump = false;
+ actual_dump_size_in_dwords = offset;
+ }
+ }
+
+ offset += qed_ilt_dump_dump_num_vf_cids(p_hwfn,
+ dump_buf + offset,
+ continue_dump,
+ &valid_conn_vf_cids);
+
+ /* Dump section containing physical memory descriptors for each
+ * ILT page.
+ */
num_pages = p_hwfn->p_cxt_mngr->ilt_shadow_size;
+
+ /* If need to dump then first check that there is enough memory
+ * in dumped buffer for the section header.
+ */
+ if (continue_dump) {
+ section_size = qed_dump_section_hdr(NULL,
+ false,
+ "ilt_page_desc",
+ 1) +
+ qed_dump_num_param(NULL,
+ false,
+ "size",
+ num_pages * PAGE_MEM_DESC_SIZE_DWORDS);
+ if (offset + section_size > buf_size_in_dwords) {
+ continue_dump = false;
+ actual_dump_size_in_dwords = offset;
+ }
+ }
+
offset += qed_dump_section_hdr(dump_buf + offset,
- dump, "ilt_page_desc", 1);
+ continue_dump, "ilt_page_desc", 1);
offset += qed_dump_num_param(dump_buf + offset,
- dump,
+ continue_dump,
"size",
num_pages * PAGE_MEM_DESC_SIZE_DWORDS);
- /* Copy memory descriptors to dump buffer */
- if (dump) {
- u32 page_id;
-
+ /* Copy memory descriptors to dump buffer
+ * If need to dump then dump till the dump buffer size
+ */
+ if (continue_dump) {
for (page_id = 0; page_id < num_pages;
- page_id++, offset += PAGE_MEM_DESC_SIZE_DWORDS)
- memcpy(dump_buf + offset,
- &ilt_pages[page_id],
- DWORDS_TO_BYTES(PAGE_MEM_DESC_SIZE_DWORDS));
+ page_id++, offset += PAGE_MEM_DESC_SIZE_DWORDS) {
+ if (continue_dump &&
+ (offset + PAGE_MEM_DESC_SIZE_DWORDS <=
+ buf_size_in_dwords)) {
+ memcpy(dump_buf + offset,
+ &ilt_pages[page_id],
+ DWORDS_TO_BYTES
+ (PAGE_MEM_DESC_SIZE_DWORDS));
+ } else {
+ if (continue_dump) {
+ continue_dump = false;
+ actual_dump_size_in_dwords = offset;
+ }
+ }
+ }
} else {
offset += num_pages * PAGE_MEM_DESC_SIZE_DWORDS;
}
@@ -4798,25 +5159,31 @@ static u32 qed_ilt_dump(struct qed_hwfn *p_hwfn,
num_cids_per_page);
/* Dump ILT pages IDs */
- offset += qed_ilt_dump_pages_section(p_hwfn,
- dump_buf + offset,
- dump,
- valid_conn_pf_pages,
- valid_conn_vf_pages,
- ilt_pages, true);
+ qed_ilt_dump_pages_section(p_hwfn, dump_buf, &offset, &continue_dump,
+ valid_conn_pf_pages, valid_conn_vf_pages,
+ ilt_pages, true, buf_size_in_dwords,
+ &actual_dump_size_in_dwords);
/* Dump ILT pages memory */
- offset += qed_ilt_dump_pages_section(p_hwfn,
- dump_buf + offset,
- dump,
- valid_conn_pf_pages,
- valid_conn_vf_pages,
- ilt_pages, false);
+ qed_ilt_dump_pages_section(p_hwfn, dump_buf, &offset, &continue_dump,
+ valid_conn_pf_pages, valid_conn_vf_pages,
+ ilt_pages, false, buf_size_in_dwords,
+ &actual_dump_size_in_dwords);
+
+ real_dumped_size =
+ (continue_dump == dump) ? offset : actual_dump_size_in_dwords;
+ qed_dump_num_param(dump_buf + full_dump_off, dump,
+ "full-dump-size", offset + last_section_size);
+ qed_dump_num_param(dump_buf + actul_dump_off,
+ dump,
+ "actual-dump-size",
+ real_dumped_size + last_section_size);
/* Dump last section */
- offset += qed_dump_last_section(dump_buf, offset, dump);
+ real_dumped_size += qed_dump_last_section(dump_buf,
+ real_dumped_size, dump);
- return offset;
+ return real_dumped_size;
}
/***************************** Public Functions *******************************/
@@ -4837,6 +5204,16 @@ enum dbg_status qed_dbg_set_bin_ptr(struct qed_hwfn *p_hwfn,
return DBG_STATUS_OK;
}
+static enum dbg_status qed_dbg_set_app_ver(u32 ver)
+{
+ if (ver < TOOLS_VERSION)
+ return DBG_STATUS_UNSUPPORTED_APP_VERSION;
+
+ s_app_ver = ver;
+
+ return DBG_STATUS_OK;
+}
+
bool qed_read_fw_info(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, struct fw_info *fw_info)
{
@@ -4975,6 +5352,9 @@ enum dbg_status qed_dbg_grc_dump(struct qed_hwfn *p_hwfn,
if (buf_size_in_dwords < needed_buf_size_in_dwords)
return DBG_STATUS_DUMP_BUF_TOO_SMALL;
+ /* Doesn't do anything, needed for compile time asserts */
+ qed_static_asserts();
+
/* GRC Dump */
status = qed_grc_dump(p_hwfn, p_ptt, dump_buf, true, num_dumped_dwords);
@@ -5296,7 +5676,7 @@ static enum dbg_status qed_dbg_ilt_get_dump_buf_size(struct qed_hwfn *p_hwfn,
if (status != DBG_STATUS_OK)
return status;
- *buf_size = qed_ilt_dump(p_hwfn, p_ptt, NULL, false);
+ *buf_size = qed_ilt_dump(p_hwfn, p_ptt, NULL, 0, false);
return DBG_STATUS_OK;
}
@@ -5307,21 +5687,9 @@ static enum dbg_status qed_dbg_ilt_dump(struct qed_hwfn *p_hwfn,
u32 buf_size_in_dwords,
u32 *num_dumped_dwords)
{
- u32 needed_buf_size_in_dwords;
- enum dbg_status status;
-
- *num_dumped_dwords = 0;
-
- status = qed_dbg_ilt_get_dump_buf_size(p_hwfn,
- p_ptt,
- &needed_buf_size_in_dwords);
- if (status != DBG_STATUS_OK)
- return status;
-
- if (buf_size_in_dwords < needed_buf_size_in_dwords)
- return DBG_STATUS_DUMP_BUF_TOO_SMALL;
-
- *num_dumped_dwords = qed_ilt_dump(p_hwfn, p_ptt, dump_buf, true);
+ *num_dumped_dwords = qed_ilt_dump(p_hwfn,
+ p_ptt,
+ dump_buf, buf_size_in_dwords, true);
/* Reveret GRC params to their default */
qed_dbg_grc_set_params_default(p_hwfn);
@@ -5724,7 +6092,46 @@ static const char * const s_status_str[] = {
"The configured filter mode requires that all the constraints of a single trigger state will be defined on a single Storm/block input",
/* DBG_STATUS_MISSING_TRIGGER_STATE_STORM */
- "When triggering on Storm data, the Storm to trigger on must be specified"
+ "When triggering on Storm data, the Storm to trigger on must be specified",
+
+ /* DBG_STATUS_MDUMP2_FAILED_TO_REQUEST_OFFSIZE */
+ "Failed to request MDUMP2 Offsize",
+
+ /* DBG_STATUS_MDUMP2_FAILED_VALIDATION_OF_DATA_CRC */
+ "Expected CRC (part of the MDUMP2 data) is different than the calculated CRC over that data",
+
+ /* DBG_STATUS_MDUMP2_INVALID_SIGNATURE */
+ "Invalid Signature found at start of MDUMP2",
+
+ /* DBG_STATUS_MDUMP2_INVALID_LOG_SIZE */
+ "Invalid Log Size of MDUMP2",
+
+ /* DBG_STATUS_MDUMP2_INVALID_LOG_HDR */
+ "Invalid Log Header of MDUMP2",
+
+ /* DBG_STATUS_MDUMP2_INVALID_LOG_DATA */
+ "Invalid Log Data of MDUMP2",
+
+ /* DBG_STATUS_MDUMP2_ERROR_EXTRACTING_NUM_PORTS */
+ "Could not extract number of ports from regval buf of MDUMP2",
+
+ /* DBG_STATUS_MDUMP2_ERROR_EXTRACTING_MFW_STATUS */
+ "Could not extract MFW (link) status from regval buf of MDUMP2",
+
+ /* DBG_STATUS_MDUMP2_ERROR_DISPLAYING_LINKDUMP */
+ "Could not display linkdump of MDUMP2",
+
+ /* DBG_STATUS_MDUMP2_ERROR_READING_PHY_CFG */
+ "Could not read PHY CFG of MDUMP2",
+
+ /* DBG_STATUS_MDUMP2_ERROR_READING_PLL_MODE */
+ "Could not read PLL Mode of MDUMP2",
+
+ /* DBG_STATUS_MDUMP2_ERROR_READING_LANE_REGS */
+ "Could not read TSCF/TSCE Lane Regs of MDUMP2",
+
+ /* DBG_STATUS_MDUMP2_ERROR_ALLOCATING_BUF */
+ "Could not allocate MDUMP2 reg-val internal buffer"
};
/* Idle check severity names array */
@@ -5874,6 +6281,10 @@ static char s_temp_buf[MAX_MSG_LEN];
/**************************** Private Functions ******************************/
+static void qed_user_static_asserts(void)
+{
+}
+
static u32 qed_cyclic_add(u32 a, u32 b, u32 size)
{
return (a + b) % size;
@@ -6153,9 +6564,8 @@ static u32 qed_parse_idle_chk_dump_rules(struct qed_hwfn *p_hwfn,
/* Skip register names until the required reg_id is
* reached.
*/
- for (; reg_id > curr_reg_id;
- curr_reg_id++,
- parsing_str += strlen(parsing_str) + 1);
+ for (; reg_id > curr_reg_id; curr_reg_id++)
+ parsing_str += strlen(parsing_str) + 1;
results_offset +=
sprintf(qed_get_buf_ptr(results_buf,
@@ -6208,9 +6618,9 @@ static enum dbg_status qed_parse_idle_chk_dump(struct qed_hwfn *p_hwfn,
u32 *num_errors,
u32 *num_warnings)
{
+ u32 num_section_params = 0, num_rules, num_rules_not_dumped;
const char *section_name, *param_name, *param_str_val;
u32 *dump_buf_end = dump_buf + num_dumped_dwords;
- u32 num_section_params = 0, num_rules;
/* Offset in results_buf in bytes */
u32 results_offset = 0;
@@ -6234,15 +6644,31 @@ static enum dbg_status qed_parse_idle_chk_dump(struct qed_hwfn *p_hwfn,
num_section_params,
results_buf, &results_offset);
- /* Read idle_chk section */
+ /* Read idle_chk section
+ * There may be 1 or 2 idle_chk section parameters:
+ * - 1st is "num_rules"
+ * - 2nd is "num_rules_not_dumped" (optional)
+ */
+
dump_buf += qed_read_section_hdr(dump_buf,
&section_name, &num_section_params);
- if (strcmp(section_name, "idle_chk") || num_section_params != 1)
+ if (strcmp(section_name, "idle_chk") ||
+ (num_section_params != 2 && num_section_params != 1))
return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
dump_buf += qed_read_param(dump_buf,
&param_name, &param_str_val, &num_rules);
if (strcmp(param_name, "num_rules"))
return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
+ if (num_section_params > 1) {
+ dump_buf += qed_read_param(dump_buf,
+ &param_name,
+ &param_str_val,
+ &num_rules_not_dumped);
+ if (strcmp(param_name, "num_rules_not_dumped"))
+ return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
+ } else {
+ num_rules_not_dumped = 0;
+ }
if (num_rules) {
u32 rules_print_size;
@@ -6309,6 +6735,13 @@ static enum dbg_status qed_parse_idle_chk_dump(struct qed_hwfn *p_hwfn,
results_offset),
"\nIdle Check completed successfully\n");
+ if (num_rules_not_dumped)
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset),
+ "\nIdle Check Partially dumped : num_rules_not_dumped = %d\n",
+ num_rules_not_dumped);
+
/* Add 1 for string NULL termination */
*parsed_results_bytes = results_offset + 1;
@@ -7160,6 +7593,9 @@ enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn,
{
u32 parsed_buf_size;
+ /* Doesn't do anything, needed for compile time asserts */
+ qed_user_static_asserts();
+
return qed_parse_mcp_trace_dump(p_hwfn,
dump_buf,
results_buf, &parsed_buf_size, true);
@@ -7336,7 +7772,7 @@ enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn,
reg_result->block_attn_offset;
/* Go over attention status bits */
- for (j = 0; j < num_reg_attn; j++, bit_idx++) {
+ for (j = 0; j < num_reg_attn; j++) {
u16 attn_idx_val = GET_FIELD(bit_mapping[j].data,
DBG_ATTN_BIT_MAPPING_VAL);
const char *attn_name, *attn_type_str, *masked_str;
@@ -7353,35 +7789,36 @@ enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn,
}
/* Check current bit index */
- if (!(reg_result->sts_val & BIT(bit_idx)))
- continue;
+ if (reg_result->sts_val & BIT(bit_idx)) {
+ /* An attention bit with value=1 was found
+ * Find attention name
+ */
+ attn_name_offset =
+ block_attn_name_offsets[attn_idx_val];
+ attn_name = attn_name_base + attn_name_offset;
+ attn_type_str =
+ (attn_type ==
+ ATTN_TYPE_INTERRUPT ? "Interrupt" :
+ "Parity");
+ masked_str = reg_result->mask_val &
+ BIT(bit_idx) ?
+ " [masked]" : "";
+ sts_addr =
+ GET_FIELD(reg_result->data,
+ DBG_ATTN_REG_RESULT_STS_ADDRESS);
+ DP_NOTICE(p_hwfn,
+ "%s (%s) : %s [address 0x%08x, bit %d]%s\n",
+ block_name, attn_type_str, attn_name,
+ sts_addr * 4, bit_idx, masked_str);
+ }
- /* An attention bit with value=1 was found
- * Find attention name
- */
- attn_name_offset =
- block_attn_name_offsets[attn_idx_val];
- attn_name = attn_name_base + attn_name_offset;
- attn_type_str =
- (attn_type ==
- ATTN_TYPE_INTERRUPT ? "Interrupt" :
- "Parity");
- masked_str = reg_result->mask_val & BIT(bit_idx) ?
- " [masked]" : "";
- sts_addr = GET_FIELD(reg_result->data,
- DBG_ATTN_REG_RESULT_STS_ADDRESS);
- DP_NOTICE(p_hwfn,
- "%s (%s) : %s [address 0x%08x, bit %d]%s\n",
- block_name, attn_type_str, attn_name,
- sts_addr * 4, bit_idx, masked_str);
+ bit_idx++;
}
}
return DBG_STATUS_OK;
}
-static DEFINE_MUTEX(qed_dbg_lock);
-
/* Wrapper for unifying the idle_chk and mcp_trace api */
static enum dbg_status
qed_print_idle_chk_results_wrapper(struct qed_hwfn *p_hwfn,
@@ -7396,9 +7833,26 @@ qed_print_idle_chk_results_wrapper(struct qed_hwfn *p_hwfn,
&num_warnnings);
}
+static DEFINE_MUTEX(qed_dbg_lock);
+
+#define MAX_PHY_RESULT_BUFFER 9000
+
+/******************************** Feature Meta data section ******************/
+
+#define GRC_NUM_STR_FUNCS 2
+#define IDLE_CHK_NUM_STR_FUNCS 1
+#define MCP_TRACE_NUM_STR_FUNCS 1
+#define REG_FIFO_NUM_STR_FUNCS 1
+#define IGU_FIFO_NUM_STR_FUNCS 1
+#define PROTECTION_OVERRIDE_NUM_STR_FUNCS 1
+#define FW_ASSERTS_NUM_STR_FUNCS 1
+#define ILT_NUM_STR_FUNCS 1
+#define PHY_NUM_STR_FUNCS 20
+
/* Feature meta data lookup table */
static struct {
char *name;
+ u32 num_funcs;
enum dbg_status (*get_size)(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u32 *size);
enum dbg_status (*perform_dump)(struct qed_hwfn *p_hwfn,
@@ -7411,40 +7865,46 @@ static struct {
u32 *dump_buf,
u32 num_dumped_dwords,
u32 *results_buf_size);
+ const struct qed_func_lookup *hsi_func_lookup;
} qed_features_lookup[] = {
{
- "grc", qed_dbg_grc_get_dump_buf_size,
- qed_dbg_grc_dump, NULL, NULL}, {
- "idle_chk",
+ "grc", GRC_NUM_STR_FUNCS, qed_dbg_grc_get_dump_buf_size,
+ qed_dbg_grc_dump, NULL, NULL, NULL}, {
+ "idle_chk", IDLE_CHK_NUM_STR_FUNCS,
qed_dbg_idle_chk_get_dump_buf_size,
qed_dbg_idle_chk_dump,
qed_print_idle_chk_results_wrapper,
- qed_get_idle_chk_results_buf_size}, {
- "mcp_trace",
+ qed_get_idle_chk_results_buf_size,
+ NULL}, {
+ "mcp_trace", MCP_TRACE_NUM_STR_FUNCS,
qed_dbg_mcp_trace_get_dump_buf_size,
qed_dbg_mcp_trace_dump, qed_print_mcp_trace_results,
- qed_get_mcp_trace_results_buf_size}, {
- "reg_fifo",
+ qed_get_mcp_trace_results_buf_size,
+ NULL}, {
+ "reg_fifo", REG_FIFO_NUM_STR_FUNCS,
qed_dbg_reg_fifo_get_dump_buf_size,
qed_dbg_reg_fifo_dump, qed_print_reg_fifo_results,
- qed_get_reg_fifo_results_buf_size}, {
- "igu_fifo",
+ qed_get_reg_fifo_results_buf_size,
+ NULL}, {
+ "igu_fifo", IGU_FIFO_NUM_STR_FUNCS,
qed_dbg_igu_fifo_get_dump_buf_size,
qed_dbg_igu_fifo_dump, qed_print_igu_fifo_results,
- qed_get_igu_fifo_results_buf_size}, {
- "protection_override",
+ qed_get_igu_fifo_results_buf_size,
+ NULL}, {
+ "protection_override", PROTECTION_OVERRIDE_NUM_STR_FUNCS,
qed_dbg_protection_override_get_dump_buf_size,
qed_dbg_protection_override_dump,
qed_print_protection_override_results,
- qed_get_protection_override_results_buf_size}, {
- "fw_asserts",
+ qed_get_protection_override_results_buf_size,
+ NULL}, {
+ "fw_asserts", FW_ASSERTS_NUM_STR_FUNCS,
qed_dbg_fw_asserts_get_dump_buf_size,
qed_dbg_fw_asserts_dump,
qed_print_fw_asserts_results,
- qed_get_fw_asserts_results_buf_size}, {
- "ilt",
- qed_dbg_ilt_get_dump_buf_size,
- qed_dbg_ilt_dump, NULL, NULL},};
+ qed_get_fw_asserts_results_buf_size,
+ NULL}, {
+ "ilt", ILT_NUM_STR_FUNCS, qed_dbg_ilt_get_dump_buf_size,
+ qed_dbg_ilt_dump, NULL, NULL, NULL},};
static void qed_dbg_print_feature(u8 *p_text_buf, u32 text_size)
{
@@ -7466,7 +7926,8 @@ static enum dbg_status format_feature(struct qed_hwfn *p_hwfn,
{
struct qed_dbg_feature *feature =
&p_hwfn->cdev->dbg_features[feature_idx];
- u32 text_size_bytes, null_char_pos, i;
+ u32 txt_size_bytes, null_char_pos, i;
+ u32 *dbuf, dwords;
enum dbg_status rc;
char *text_buf;
@@ -7474,33 +7935,43 @@ static enum dbg_status format_feature(struct qed_hwfn *p_hwfn,
if (!qed_features_lookup[feature_idx].results_buf_size)
return DBG_STATUS_OK;
+ dbuf = (u32 *)feature->dump_buf;
+ dwords = feature->dumped_dwords;
+
/* Obtain size of formatted output */
- rc = qed_features_lookup[feature_idx].
- results_buf_size(p_hwfn, (u32 *)feature->dump_buf,
- feature->dumped_dwords, &text_size_bytes);
+ rc = qed_features_lookup[feature_idx].results_buf_size(p_hwfn,
+ dbuf,
+ dwords,
+ &txt_size_bytes);
if (rc != DBG_STATUS_OK)
return rc;
- /* Make sure that the allocated size is a multiple of dword (4 bytes) */
- null_char_pos = text_size_bytes - 1;
- text_size_bytes = (text_size_bytes + 3) & ~0x3;
+ /* Make sure that the allocated size is a multiple of dword
+ * (4 bytes).
+ */
+ null_char_pos = txt_size_bytes - 1;
+ txt_size_bytes = (txt_size_bytes + 3) & ~0x3;
- if (text_size_bytes < QED_RESULTS_BUF_MIN_SIZE) {
+ if (txt_size_bytes < QED_RESULTS_BUF_MIN_SIZE) {
DP_NOTICE(p_hwfn->cdev,
"formatted size of feature was too small %d. Aborting\n",
- text_size_bytes);
+ txt_size_bytes);
return DBG_STATUS_INVALID_ARGS;
}
- /* Allocate temp text buf */
- text_buf = vzalloc(text_size_bytes);
- if (!text_buf)
+ /* allocate temp text buf */
+ text_buf = vzalloc(txt_size_bytes);
+ if (!text_buf) {
+ DP_NOTICE(p_hwfn->cdev,
+ "failed to allocate text buffer. Aborting\n");
return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
+ }
/* Decode feature opcodes to string on temp buf */
- rc = qed_features_lookup[feature_idx].
- print_results(p_hwfn, (u32 *)feature->dump_buf,
- feature->dumped_dwords, text_buf);
+ rc = qed_features_lookup[feature_idx].print_results(p_hwfn,
+ dbuf,
+ dwords,
+ text_buf);
if (rc != DBG_STATUS_OK) {
vfree(text_buf);
return rc;
@@ -7510,26 +7981,27 @@ static enum dbg_status format_feature(struct qed_hwfn *p_hwfn,
* The bytes that were added as a result of the dword alignment are also
* padded with '\n' characters.
*/
- for (i = null_char_pos; i < text_size_bytes; i++)
+ for (i = null_char_pos; i < txt_size_bytes; i++)
text_buf[i] = '\n';
/* Dump printable feature to log */
if (p_hwfn->cdev->print_dbg_data)
- qed_dbg_print_feature(text_buf, text_size_bytes);
+ qed_dbg_print_feature(text_buf, txt_size_bytes);
- /* Just return the original binary buffer if requested */
+ /* Dump binary data as is to the output file */
if (p_hwfn->cdev->dbg_bin_dump) {
vfree(text_buf);
- return DBG_STATUS_OK;
+ return rc;
}
- /* Free the old dump_buf and point the dump_buf to the newly allocagted
+ /* Free the old dump_buf and point the dump_buf to the newly allocated
* and formatted text buffer.
*/
vfree(feature->dump_buf);
feature->dump_buf = text_buf;
- feature->buf_size = text_size_bytes;
- feature->dumped_dwords = text_size_bytes / 4;
+ feature->buf_size = txt_size_bytes;
+ feature->dumped_dwords = txt_size_bytes / 4;
+
return rc;
}
@@ -7542,7 +8014,7 @@ static enum dbg_status qed_dbg_dump(struct qed_hwfn *p_hwfn,
{
struct qed_dbg_feature *feature =
&p_hwfn->cdev->dbg_features[feature_idx];
- u32 buf_size_dwords;
+ u32 buf_size_dwords, *dbuf, *dwords;
enum dbg_status rc;
DP_NOTICE(p_hwfn->cdev, "Collecting a debug feature [\"%s\"]\n",
@@ -7580,13 +8052,16 @@ static enum dbg_status qed_dbg_dump(struct qed_hwfn *p_hwfn,
if (!feature->dump_buf)
return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
- rc = qed_features_lookup[feature_idx].
- perform_dump(p_hwfn, p_ptt, (u32 *)feature->dump_buf,
- feature->buf_size / sizeof(u32),
- &feature->dumped_dwords);
+ dbuf = (u32 *)feature->dump_buf;
+ dwords = &feature->dumped_dwords;
+ rc = qed_features_lookup[feature_idx].perform_dump(p_hwfn, p_ptt,
+ dbuf,
+ feature->buf_size /
+ sizeof(u32),
+ dwords);
/* If mcp is stuck we get DBG_STATUS_NVRAM_GET_IMAGE_FAILED error.
- * In this case the buffer holds valid binary data, but we wont able
+ * In this case the buffer holds valid binary data, but we won't able
* to parse it (since parsing relies on data in NVRAM which is only
* accessible when MFW is responsive). skip the formatting but return
* success so that binary data is provided.
@@ -7777,7 +8252,8 @@ enum debug_print_features {
static u32 qed_calc_regdump_header(struct qed_dev *cdev,
enum debug_print_features feature,
- int engine, u32 feature_size, u8 omit_engine)
+ int engine, u32 feature_size,
+ u8 omit_engine, u8 dbg_bin_dump)
{
u32 res = 0;
@@ -7788,7 +8264,7 @@ static u32 qed_calc_regdump_header(struct qed_dev *cdev,
feature, feature_size);
SET_FIELD(res, REGDUMP_HEADER_FEATURE, feature);
- SET_FIELD(res, REGDUMP_HEADER_BIN_DUMP, 1);
+ SET_FIELD(res, REGDUMP_HEADER_BIN_DUMP, dbg_bin_dump);
SET_FIELD(res, REGDUMP_HEADER_OMIT_ENGINE, omit_engine);
SET_FIELD(res, REGDUMP_HEADER_ENGINE, engine);
@@ -7798,12 +8274,10 @@ static u32 qed_calc_regdump_header(struct qed_dev *cdev,
int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
{
u8 cur_engine, omit_engine = 0, org_engine;
- struct qed_hwfn *p_hwfn =
- &cdev->hwfns[cdev->engine_for_debug];
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[cdev->engine_for_debug];
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
- int grc_params[MAX_DBG_GRC_PARAMS], i;
+ int grc_params[MAX_DBG_GRC_PARAMS], rc, i;
u32 offset = 0, feature_size;
- int rc;
for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
grc_params[i] = dev_data->grc.param_val[i];
@@ -7811,8 +8285,8 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
if (!QED_IS_CMT(cdev))
omit_engine = 1;
+ cdev->dbg_bin_dump = 1;
mutex_lock(&qed_dbg_lock);
- cdev->dbg_bin_dump = true;
org_engine = qed_get_debug_engine(cdev);
for (cur_engine = 0; cur_engine < cdev->num_hwfns; cur_engine++) {
@@ -7826,8 +8300,11 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
REGDUMP_HEADER_SIZE, &feature_size);
if (!rc) {
*(u32 *)((u8 *)buffer + offset) =
- qed_calc_regdump_header(cdev, IDLE_CHK, cur_engine,
- feature_size, omit_engine);
+ qed_calc_regdump_header(cdev, IDLE_CHK,
+ cur_engine,
+ feature_size,
+ omit_engine,
+ cdev->dbg_bin_dump);
offset += (feature_size + REGDUMP_HEADER_SIZE);
} else {
DP_ERR(cdev, "qed_dbg_idle_chk failed. rc = %d\n", rc);
@@ -7838,8 +8315,11 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
REGDUMP_HEADER_SIZE, &feature_size);
if (!rc) {
*(u32 *)((u8 *)buffer + offset) =
- qed_calc_regdump_header(cdev, IDLE_CHK, cur_engine,
- feature_size, omit_engine);
+ qed_calc_regdump_header(cdev, IDLE_CHK,
+ cur_engine,
+ feature_size,
+ omit_engine,
+ cdev->dbg_bin_dump);
offset += (feature_size + REGDUMP_HEADER_SIZE);
} else {
DP_ERR(cdev, "qed_dbg_idle_chk failed. rc = %d\n", rc);
@@ -7850,8 +8330,11 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
REGDUMP_HEADER_SIZE, &feature_size);
if (!rc) {
*(u32 *)((u8 *)buffer + offset) =
- qed_calc_regdump_header(cdev, REG_FIFO, cur_engine,
- feature_size, omit_engine);
+ qed_calc_regdump_header(cdev, REG_FIFO,
+ cur_engine,
+ feature_size,
+ omit_engine,
+ cdev->dbg_bin_dump);
offset += (feature_size + REGDUMP_HEADER_SIZE);
} else {
DP_ERR(cdev, "qed_dbg_reg_fifo failed. rc = %d\n", rc);
@@ -7862,8 +8345,11 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
REGDUMP_HEADER_SIZE, &feature_size);
if (!rc) {
*(u32 *)((u8 *)buffer + offset) =
- qed_calc_regdump_header(cdev, IGU_FIFO, cur_engine,
- feature_size, omit_engine);
+ qed_calc_regdump_header(cdev, IGU_FIFO,
+ cur_engine,
+ feature_size,
+ omit_engine,
+ cdev->dbg_bin_dump);
offset += (feature_size + REGDUMP_HEADER_SIZE);
} else {
DP_ERR(cdev, "qed_dbg_igu_fifo failed. rc = %d", rc);
@@ -7875,9 +8361,12 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
&feature_size);
if (!rc) {
*(u32 *)((u8 *)buffer + offset) =
- qed_calc_regdump_header(cdev, PROTECTION_OVERRIDE,
+ qed_calc_regdump_header(cdev,
+ PROTECTION_OVERRIDE,
cur_engine,
- feature_size, omit_engine);
+ feature_size,
+ omit_engine,
+ cdev->dbg_bin_dump);
offset += (feature_size + REGDUMP_HEADER_SIZE);
} else {
DP_ERR(cdev,
@@ -7891,8 +8380,10 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
if (!rc) {
*(u32 *)((u8 *)buffer + offset) =
qed_calc_regdump_header(cdev, FW_ASSERTS,
- cur_engine, feature_size,
- omit_engine);
+ cur_engine,
+ feature_size,
+ omit_engine,
+ cdev->dbg_bin_dump);
offset += (feature_size + REGDUMP_HEADER_SIZE);
} else {
DP_ERR(cdev, "qed_dbg_fw_asserts failed. rc = %d\n",
@@ -7900,8 +8391,8 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
}
feature_size = qed_dbg_ilt_size(cdev);
- if (!cdev->disable_ilt_dump &&
- feature_size < ILT_DUMP_MAX_SIZE) {
+ if (!cdev->disable_ilt_dump && feature_size <
+ ILT_DUMP_MAX_SIZE) {
rc = qed_dbg_ilt(cdev, (u8 *)buffer + offset +
REGDUMP_HEADER_SIZE, &feature_size);
if (!rc) {
@@ -7909,15 +8400,16 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
qed_calc_regdump_header(cdev, ILT_DUMP,
cur_engine,
feature_size,
- omit_engine);
- offset += feature_size + REGDUMP_HEADER_SIZE;
+ omit_engine,
+ cdev->dbg_bin_dump);
+ offset += (feature_size + REGDUMP_HEADER_SIZE);
} else {
DP_ERR(cdev, "qed_dbg_ilt failed. rc = %d\n",
rc);
}
}
- /* GRC dump - must be last because when mcp stuck it will
+ /* Grc dump - must be last because when mcp stuck it will
* clutter idle_chk, reg_fifo, ...
*/
for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
@@ -7929,7 +8421,9 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
*(u32 *)((u8 *)buffer + offset) =
qed_calc_regdump_header(cdev, GRC_DUMP,
cur_engine,
- feature_size, omit_engine);
+ feature_size,
+ omit_engine,
+ cdev->dbg_bin_dump);
offset += (feature_size + REGDUMP_HEADER_SIZE);
} else {
DP_ERR(cdev, "qed_dbg_grc failed. rc = %d", rc);
@@ -7944,16 +8438,13 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
if (!rc) {
*(u32 *)((u8 *)buffer + offset) =
qed_calc_regdump_header(cdev, MCP_TRACE, cur_engine,
- feature_size, omit_engine);
+ feature_size, omit_engine,
+ cdev->dbg_bin_dump);
offset += (feature_size + REGDUMP_HEADER_SIZE);
} else {
DP_ERR(cdev, "qed_dbg_mcp_trace failed. rc = %d\n", rc);
}
- /* Re-populate nvm attribute info */
- qed_mcp_nvm_info_free(p_hwfn);
- qed_mcp_nvm_info_populate(p_hwfn);
-
/* nvm cfg1 */
rc = qed_dbg_nvm_image(cdev,
(u8 *)buffer + offset +
@@ -7962,43 +8453,51 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
if (!rc) {
*(u32 *)((u8 *)buffer + offset) =
qed_calc_regdump_header(cdev, NVM_CFG1, cur_engine,
- feature_size, omit_engine);
+ feature_size, omit_engine,
+ cdev->dbg_bin_dump);
offset += (feature_size + REGDUMP_HEADER_SIZE);
} else if (rc != -ENOENT) {
DP_ERR(cdev,
"qed_dbg_nvm_image failed for image %d (%s), rc = %d\n",
- QED_NVM_IMAGE_NVM_CFG1, "QED_NVM_IMAGE_NVM_CFG1", rc);
+ QED_NVM_IMAGE_NVM_CFG1, "QED_NVM_IMAGE_NVM_CFG1",
+ rc);
}
- /* nvm default */
+ /* nvm default */
rc = qed_dbg_nvm_image(cdev,
- (u8 *)buffer + offset + REGDUMP_HEADER_SIZE,
- &feature_size, QED_NVM_IMAGE_DEFAULT_CFG);
+ (u8 *)buffer + offset +
+ REGDUMP_HEADER_SIZE, &feature_size,
+ QED_NVM_IMAGE_DEFAULT_CFG);
if (!rc) {
*(u32 *)((u8 *)buffer + offset) =
- qed_calc_regdump_header(cdev, DEFAULT_CFG, cur_engine,
- feature_size, omit_engine);
+ qed_calc_regdump_header(cdev, DEFAULT_CFG,
+ cur_engine, feature_size,
+ omit_engine,
+ cdev->dbg_bin_dump);
offset += (feature_size + REGDUMP_HEADER_SIZE);
} else if (rc != -ENOENT) {
DP_ERR(cdev,
"qed_dbg_nvm_image failed for image %d (%s), rc = %d\n",
- QED_NVM_IMAGE_DEFAULT_CFG, "QED_NVM_IMAGE_DEFAULT_CFG",
- rc);
+ QED_NVM_IMAGE_DEFAULT_CFG,
+ "QED_NVM_IMAGE_DEFAULT_CFG", rc);
}
/* nvm meta */
rc = qed_dbg_nvm_image(cdev,
- (u8 *)buffer + offset + REGDUMP_HEADER_SIZE,
- &feature_size, QED_NVM_IMAGE_NVM_META);
+ (u8 *)buffer + offset +
+ REGDUMP_HEADER_SIZE, &feature_size,
+ QED_NVM_IMAGE_NVM_META);
if (!rc) {
*(u32 *)((u8 *)buffer + offset) =
- qed_calc_regdump_header(cdev, NVM_META, cur_engine,
- feature_size, omit_engine);
+ qed_calc_regdump_header(cdev, NVM_META, cur_engine,
+ feature_size, omit_engine,
+ cdev->dbg_bin_dump);
offset += (feature_size + REGDUMP_HEADER_SIZE);
} else if (rc != -ENOENT) {
DP_ERR(cdev,
"qed_dbg_nvm_image failed for image %d (%s), rc = %d\n",
- QED_NVM_IMAGE_NVM_META, "QED_NVM_IMAGE_NVM_META", rc);
+ QED_NVM_IMAGE_NVM_META, "QED_NVM_IMAGE_NVM_META",
+ rc);
}
/* nvm mdump */
@@ -8007,8 +8506,9 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
QED_NVM_IMAGE_MDUMP);
if (!rc) {
*(u32 *)((u8 *)buffer + offset) =
- qed_calc_regdump_header(cdev, MDUMP, cur_engine,
- feature_size, omit_engine);
+ qed_calc_regdump_header(cdev, MDUMP, cur_engine,
+ feature_size, omit_engine,
+ cdev->dbg_bin_dump);
offset += (feature_size + REGDUMP_HEADER_SIZE);
} else if (rc != -ENOENT) {
DP_ERR(cdev,
@@ -8016,17 +8516,16 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
QED_NVM_IMAGE_MDUMP, "QED_NVM_IMAGE_MDUMP", rc);
}
- cdev->dbg_bin_dump = false;
mutex_unlock(&qed_dbg_lock);
+ cdev->dbg_bin_dump = 0;
return 0;
}
int qed_dbg_all_data_size(struct qed_dev *cdev)
{
- struct qed_hwfn *p_hwfn =
- &cdev->hwfns[cdev->engine_for_debug];
u32 regs_len = 0, image_len = 0, ilt_len = 0, total_ilt_len = 0;
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[cdev->engine_for_debug];
u8 cur_engine, org_engine;
cdev->disable_ilt_dump = false;
@@ -8037,14 +8536,13 @@ int qed_dbg_all_data_size(struct qed_dev *cdev)
"calculating idle_chk and grcdump register length for current engine\n");
qed_set_debug_engine(cdev, cur_engine);
regs_len += REGDUMP_HEADER_SIZE + qed_dbg_idle_chk_size(cdev) +
- REGDUMP_HEADER_SIZE + qed_dbg_idle_chk_size(cdev) +
- REGDUMP_HEADER_SIZE + qed_dbg_grc_size(cdev) +
- REGDUMP_HEADER_SIZE + qed_dbg_reg_fifo_size(cdev) +
- REGDUMP_HEADER_SIZE + qed_dbg_igu_fifo_size(cdev) +
- REGDUMP_HEADER_SIZE +
- qed_dbg_protection_override_size(cdev) +
- REGDUMP_HEADER_SIZE + qed_dbg_fw_asserts_size(cdev);
-
+ REGDUMP_HEADER_SIZE + qed_dbg_idle_chk_size(cdev) +
+ REGDUMP_HEADER_SIZE + qed_dbg_grc_size(cdev) +
+ REGDUMP_HEADER_SIZE + qed_dbg_reg_fifo_size(cdev) +
+ REGDUMP_HEADER_SIZE + qed_dbg_igu_fifo_size(cdev) +
+ REGDUMP_HEADER_SIZE +
+ qed_dbg_protection_override_size(cdev) +
+ REGDUMP_HEADER_SIZE + qed_dbg_fw_asserts_size(cdev);
ilt_len = REGDUMP_HEADER_SIZE + qed_dbg_ilt_size(cdev);
if (ilt_len < ILT_DUMP_MAX_SIZE) {
total_ilt_len += ilt_len;
@@ -8055,7 +8553,8 @@ int qed_dbg_all_data_size(struct qed_dev *cdev)
qed_set_debug_engine(cdev, org_engine);
/* Engine common */
- regs_len += REGDUMP_HEADER_SIZE + qed_dbg_mcp_trace_size(cdev);
+ regs_len += REGDUMP_HEADER_SIZE + qed_dbg_mcp_trace_size(cdev) +
+ REGDUMP_HEADER_SIZE + qed_dbg_phy_size(cdev);
qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_NVM_CFG1, &image_len);
if (image_len)
regs_len += REGDUMP_HEADER_SIZE + image_len;
@@ -8083,10 +8582,8 @@ int qed_dbg_all_data_size(struct qed_dev *cdev)
int qed_dbg_feature(struct qed_dev *cdev, void *buffer,
enum qed_dbg_features feature, u32 *num_dumped_bytes)
{
- struct qed_hwfn *p_hwfn =
- &cdev->hwfns[cdev->engine_for_debug];
- struct qed_dbg_feature *qed_feature =
- &cdev->dbg_features[feature];
+ struct qed_dbg_feature *qed_feature = &cdev->dbg_features[feature];
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[cdev->engine_for_debug];
enum dbg_status dbg_rc;
struct qed_ptt *p_ptt;
int rc = 0;
@@ -8119,9 +8616,8 @@ out:
int qed_dbg_feature_size(struct qed_dev *cdev, enum qed_dbg_features feature)
{
- struct qed_hwfn *p_hwfn =
- &cdev->hwfns[cdev->engine_for_debug];
struct qed_dbg_feature *qed_feature = &cdev->dbg_features[feature];
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[cdev->engine_for_debug];
struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
u32 buf_size_dwords;
enum dbg_status rc;
@@ -8143,6 +8639,14 @@ int qed_dbg_feature_size(struct qed_dev *cdev, enum qed_dbg_features feature)
return qed_feature->buf_size;
}
+int qed_dbg_phy_size(struct qed_dev *cdev)
+{
+ /* return max size of phy info and
+ * phy mac_stat multiplied by the number of ports
+ */
+ return MAX_PHY_RESULT_BUFFER * (1 + qed_device_num_ports(cdev));
+}
+
u8 qed_get_debug_engine(struct qed_dev *cdev)
{
return cdev->engine_for_debug;
@@ -8160,6 +8664,9 @@ void qed_dbg_pf_init(struct qed_dev *cdev)
const u8 *dbg_values = NULL;
int i;
+ /* Sync ver with debugbus qed code */
+ qed_dbg_set_app_ver(TOOLS_VERSION);
+
/* Debug values are after init values.
* The offset is the first dword of the file.
*/
diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.h b/drivers/net/ethernet/qlogic/qed/qed_debug.h
index e71af82d3200..b0d4b937cf4a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_debug.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_debug.h
@@ -1,11 +1,11 @@
/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/* QLogic qed NIC Driver
* Copyright (c) 2015 QLogic Corporation
- * Copyright (c) 2019-2020 Marvell International Ltd.
+ * Copyright (c) 2019-2021 Marvell International Ltd.
*/
-#ifndef _QED_DEBUGFS_H
-#define _QED_DEBUGFS_H
+#ifndef _QED_DEBUG_H
+#define _QED_DEBUG_H
enum qed_dbg_features {
DBG_FEATURE_GRC,
@@ -45,6 +45,7 @@ int qed_dbg_ilt_size(struct qed_dev *cdev);
int qed_dbg_mcp_trace(struct qed_dev *cdev, void *buffer,
u32 *num_dumped_bytes);
int qed_dbg_mcp_trace_size(struct qed_dev *cdev);
+int qed_dbg_phy_size(struct qed_dev *cdev);
int qed_dbg_all_data(struct qed_dev *cdev, void *buffer);
int qed_dbg_all_data_size(struct qed_dev *cdev);
u8 qed_get_debug_engine(struct qed_dev *cdev);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index 0410c3604abd..18f3bf7c4dfe 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -25,6 +25,7 @@
#include "qed_dev_api.h"
#include "qed_fcoe.h"
#include "qed_hsi.h"
+#include "qed_iro_hsi.h"
#include "qed_hw.h"
#include "qed_init_ops.h"
#include "qed_int.h"
@@ -951,7 +952,7 @@ qed_llh_remove_filter(struct qed_hwfn *p_hwfn,
}
int qed_llh_add_mac_filter(struct qed_dev *cdev,
- u8 ppfid, u8 mac_addr[ETH_ALEN])
+ u8 ppfid, const u8 mac_addr[ETH_ALEN])
{
struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
@@ -1396,12 +1397,13 @@ void qed_resc_free(struct qed_dev *cdev)
qed_rdma_info_free(p_hwfn);
}
+ qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_COMMON);
qed_iov_free(p_hwfn);
qed_l2_free(p_hwfn);
qed_dmae_info_free(p_hwfn);
qed_dcbx_info_free(p_hwfn);
qed_dbg_user_data_free(p_hwfn);
- qed_fw_overlay_mem_free(p_hwfn, p_hwfn->fw_overlay_mem);
+ qed_fw_overlay_mem_free(p_hwfn, &p_hwfn->fw_overlay_mem);
/* Destroy doorbell recovery mechanism */
qed_db_recovery_teardown(p_hwfn);
@@ -1483,8 +1485,8 @@ static u16 qed_init_qm_get_num_pf_rls(struct qed_hwfn *p_hwfn)
u16 num_pf_rls, num_vfs = qed_init_qm_get_num_vfs(p_hwfn);
/* num RLs can't exceed resource amount of rls or vports */
- num_pf_rls = (u16) min_t(u32, RESC_NUM(p_hwfn, QED_RL),
- RESC_NUM(p_hwfn, QED_VPORT));
+ num_pf_rls = (u16)min_t(u32, RESC_NUM(p_hwfn, QED_RL),
+ RESC_NUM(p_hwfn, QED_VPORT));
/* Make sure after we reserve there's something left */
if (num_pf_rls < num_vfs + NUM_DEFAULT_RLS)
@@ -1532,8 +1534,8 @@ static void qed_init_qm_params(struct qed_hwfn *p_hwfn)
bool four_port;
/* pq and vport bases for this PF */
- qm_info->start_pq = (u16) RESC_START(p_hwfn, QED_PQ);
- qm_info->start_vport = (u8) RESC_START(p_hwfn, QED_VPORT);
+ qm_info->start_pq = (u16)RESC_START(p_hwfn, QED_PQ);
+ qm_info->start_vport = (u8)RESC_START(p_hwfn, QED_VPORT);
/* rate limiting and weighted fair queueing are always enabled */
qm_info->vport_rl_en = true;
@@ -1628,9 +1630,9 @@ static void qed_init_qm_advance_vport(struct qed_hwfn *p_hwfn)
*/
/* flags for pq init */
-#define PQ_INIT_SHARE_VPORT (1 << 0)
-#define PQ_INIT_PF_RL (1 << 1)
-#define PQ_INIT_VF_RL (1 << 2)
+#define PQ_INIT_SHARE_VPORT BIT(0)
+#define PQ_INIT_PF_RL BIT(1)
+#define PQ_INIT_VF_RL BIT(2)
/* defines for pq init */
#define PQ_INIT_DEFAULT_WRR_GROUP 1
@@ -2290,7 +2292,7 @@ int qed_resc_alloc(struct qed_dev *cdev)
goto alloc_no_mem;
}
- rc = qed_eq_alloc(p_hwfn, (u16) n_eqes);
+ rc = qed_eq_alloc(p_hwfn, (u16)n_eqes);
if (rc)
goto alloc_err;
@@ -2375,6 +2377,49 @@ alloc_err:
return rc;
}
+static int qed_fw_err_handler(struct qed_hwfn *p_hwfn,
+ u8 opcode,
+ u16 echo,
+ union event_ring_data *data, u8 fw_return_code)
+{
+ if (fw_return_code != COMMON_ERR_CODE_ERROR)
+ goto eqe_unexpected;
+
+ if (data->err_data.recovery_scope == ERR_SCOPE_FUNC &&
+ le16_to_cpu(data->err_data.entity_id) >= MAX_NUM_PFS) {
+ qed_sriov_vfpf_malicious(p_hwfn, &data->err_data);
+ return 0;
+ }
+
+eqe_unexpected:
+ DP_ERR(p_hwfn,
+ "Skipping unexpected eqe 0x%02x, FW return code 0x%x, echo 0x%x\n",
+ opcode, fw_return_code, echo);
+ return -EINVAL;
+}
+
+static int qed_common_eqe_event(struct qed_hwfn *p_hwfn,
+ u8 opcode,
+ __le16 echo,
+ union event_ring_data *data,
+ u8 fw_return_code)
+{
+ switch (opcode) {
+ case COMMON_EVENT_VF_PF_CHANNEL:
+ case COMMON_EVENT_VF_FLR:
+ return qed_sriov_eqe_event(p_hwfn, opcode, echo, data,
+ fw_return_code);
+ case COMMON_EVENT_FW_ERROR:
+ return qed_fw_err_handler(p_hwfn, opcode,
+ le16_to_cpu(echo), data,
+ fw_return_code);
+ default:
+ DP_INFO(p_hwfn->cdev, "Unknown eqe event 0x%02x, echo 0x%x\n",
+ opcode, echo);
+ return -EINVAL;
+ }
+}
+
void qed_resc_setup(struct qed_dev *cdev)
{
int i;
@@ -2403,6 +2448,8 @@ void qed_resc_setup(struct qed_dev *cdev)
qed_l2_setup(p_hwfn);
qed_iov_setup(p_hwfn);
+ qed_spq_register_async_cb(p_hwfn, PROTOCOLID_COMMON,
+ qed_common_eqe_event);
#ifdef CONFIG_QED_LL2
if (p_hwfn->using_ll2)
qed_ll2_setup(p_hwfn);
@@ -2430,9 +2477,8 @@ int qed_final_cleanup(struct qed_hwfn *p_hwfn,
u32 command = 0, addr, count = FINAL_CLEANUP_POLL_CNT;
int rc = -EBUSY;
- addr = GTT_BAR0_MAP_REG_USDM_RAM +
- USTORM_FLR_FINAL_ACK_OFFSET(p_hwfn->rel_pf_id);
-
+ addr = GET_GTT_REG_ADDR(GTT_BAR0_MAP_REG_USDM_RAM,
+ USTORM_FLR_FINAL_ACK, p_hwfn->rel_pf_id);
if (is_vf)
id += 0x10;
@@ -2592,7 +2638,7 @@ static void qed_init_cache_line_size(struct qed_hwfn *p_hwfn,
cache_line_size);
}
- if (L1_CACHE_BYTES > wr_mbs)
+ if (wr_mbs < L1_CACHE_BYTES)
DP_INFO(p_hwfn,
"The cache line size for padding is suboptimal for performance [OS cache line size 0x%x, wr mbs 0x%x]\n",
L1_CACHE_BYTES, wr_mbs);
@@ -2608,13 +2654,21 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, int hw_mode)
{
struct qed_qm_info *qm_info = &p_hwfn->qm_info;
- struct qed_qm_common_rt_init_params params;
+ struct qed_qm_common_rt_init_params *params;
struct qed_dev *cdev = p_hwfn->cdev;
u8 vf_id, max_num_vfs;
u16 num_pfs, pf_id;
u32 concrete_fid;
int rc = 0;
+ params = kzalloc(sizeof(*params), GFP_KERNEL);
+ if (!params) {
+ DP_NOTICE(p_hwfn->cdev,
+ "Failed to allocate common init params\n");
+
+ return -ENOMEM;
+ }
+
qed_init_cau_rt_data(cdev);
/* Program GTT windows */
@@ -2627,16 +2681,15 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
qm_info->pf_wfq_en = true;
}
- memset(&params, 0, sizeof(params));
- params.max_ports_per_engine = p_hwfn->cdev->num_ports_in_engine;
- params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port;
- params.pf_rl_en = qm_info->pf_rl_en;
- params.pf_wfq_en = qm_info->pf_wfq_en;
- params.global_rl_en = qm_info->vport_rl_en;
- params.vport_wfq_en = qm_info->vport_wfq_en;
- params.port_params = qm_info->qm_port_params;
+ params->max_ports_per_engine = p_hwfn->cdev->num_ports_in_engine;
+ params->max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port;
+ params->pf_rl_en = qm_info->pf_rl_en;
+ params->pf_wfq_en = qm_info->pf_wfq_en;
+ params->global_rl_en = qm_info->vport_rl_en;
+ params->vport_wfq_en = qm_info->vport_wfq_en;
+ params->port_params = qm_info->qm_port_params;
- qed_qm_common_rt_init(p_hwfn, &params);
+ qed_qm_common_rt_init(p_hwfn, params);
qed_cxt_hw_init_common(p_hwfn);
@@ -2644,7 +2697,7 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
rc = qed_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ANY_PHASE_ID, hw_mode);
if (rc)
- return rc;
+ goto out;
qed_wr(p_hwfn, p_ptt, PSWRQ2_REG_L2P_VALIDATE_VFID, 0);
qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_USE_CLIENTID_IN_TAG, 1);
@@ -2663,7 +2716,7 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
max_num_vfs = QED_IS_AH(cdev) ? MAX_NUM_VFS_K2 : MAX_NUM_VFS_BB;
for (vf_id = 0; vf_id < max_num_vfs; vf_id++) {
concrete_fid = qed_vfid_to_concrete(p_hwfn, vf_id);
- qed_fid_pretend(p_hwfn, p_ptt, (u16) concrete_fid);
+ qed_fid_pretend(p_hwfn, p_ptt, (u16)concrete_fid);
qed_wr(p_hwfn, p_ptt, CCFC_REG_STRONG_ENABLE_VF, 0x1);
qed_wr(p_hwfn, p_ptt, CCFC_REG_WEAK_ENABLE_VF, 0x0);
qed_wr(p_hwfn, p_ptt, TCFC_REG_STRONG_ENABLE_VF, 0x1);
@@ -2672,6 +2725,9 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
/* pretend to original PF */
qed_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
+out:
+ kfree(params);
+
return rc;
}
@@ -2784,7 +2840,7 @@ qed_hw_init_pf_doorbell_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
qed_rdma_dpm_bar(p_hwfn, p_ptt);
}
- p_hwfn->wid_count = (u16) n_cpus;
+ p_hwfn->wid_count = (u16)n_cpus;
DP_INFO(p_hwfn,
"doorbell bar: normal_region_size=%d, pwm_region_size=%d, dpi_size=%d, dpi_count=%d, roce_edpm=%s, page_size=%lu\n",
@@ -3503,8 +3559,8 @@ static void qed_hw_hwfn_prepare(struct qed_hwfn *p_hwfn)
static void get_function_id(struct qed_hwfn *p_hwfn)
{
/* ME Register */
- p_hwfn->hw_info.opaque_fid = (u16) REG_RD(p_hwfn,
- PXP_PF_ME_OPAQUE_ADDR);
+ p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn,
+ PXP_PF_ME_OPAQUE_ADDR);
p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, PXP_PF_ME_CONCRETE_ADDR);
@@ -3670,12 +3726,14 @@ u32 qed_get_hsi_def_val(struct qed_dev *cdev, enum qed_hsi_def_type type)
return qed_hsi_def_val[type][chip_id];
}
+
static int
qed_hw_set_soft_resc_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
u32 resc_max_val, mcp_resp;
u8 res_id;
int rc;
+
for (res_id = 0; res_id < QED_MAX_RESC; res_id++) {
switch (res_id) {
case QED_LL2_RAM_QUEUE:
@@ -3921,7 +3979,7 @@ static int qed_hw_get_resc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
* resources allocation queries should be atomic. Since several PFs can
* run in parallel - a resource lock is needed.
* If either the resource lock or resource set value commands are not
- * supported - skip the the max values setting, release the lock if
+ * supported - skip the max values setting, release the lock if
* needed, and proceed to the queries. Other failures, including a
* failure to acquire the lock, will cause this function to fail.
*/
@@ -4775,7 +4833,7 @@ int qed_fw_l2_queue(struct qed_hwfn *p_hwfn, u16 src_id, u16 *dst_id)
if (src_id >= RESC_NUM(p_hwfn, QED_L2_QUEUE)) {
u16 min, max;
- min = (u16) RESC_START(p_hwfn, QED_L2_QUEUE);
+ min = (u16)RESC_START(p_hwfn, QED_L2_QUEUE);
max = min + RESC_NUM(p_hwfn, QED_L2_QUEUE);
DP_NOTICE(p_hwfn,
"l2_queue id [%d] is not valid, available indices [%d - %d]\n",
@@ -4909,7 +4967,7 @@ int qed_set_rxq_coalesce(struct qed_hwfn *p_hwfn,
goto out;
address = BAR0_MAP_REG_USDM_RAM +
- USTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id);
+ USTORM_ETH_QUEUE_ZONE_GTT_OFFSET(p_cid->abs.queue_id);
rc = qed_set_coalesce(p_hwfn, p_ptt, address, &eth_qzone,
sizeof(struct ustorm_eth_queue_zone), timeset);
@@ -4948,7 +5006,7 @@ int qed_set_txq_coalesce(struct qed_hwfn *p_hwfn,
goto out;
address = BAR0_MAP_REG_XSDM_RAM +
- XSTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id);
+ XSTORM_ETH_QUEUE_ZONE_GTT_OFFSET(p_cid->abs.queue_id);
rc = qed_set_coalesce(p_hwfn, p_ptt, address, &eth_qzone,
sizeof(struct xstorm_eth_queue_zone), timeset);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
index d3c1f3879be8..f8682356d0cf 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
@@ -15,44 +15,52 @@
#include "qed_int.h"
/**
- * @brief qed_init_dp - initialize the debug level
+ * qed_init_dp(): Initialize the debug level.
*
- * @param cdev
- * @param dp_module
- * @param dp_level
+ * @cdev: Qed dev pointer.
+ * @dp_module: Module debug parameter.
+ * @dp_level: Module debug level.
+ *
+ * Return: Void.
*/
void qed_init_dp(struct qed_dev *cdev,
u32 dp_module,
u8 dp_level);
/**
- * @brief qed_init_struct - initialize the device structure to
- * its defaults
+ * qed_init_struct(): Initialize the device structure to
+ * its defaults.
+ *
+ * @cdev: Qed dev pointer.
*
- * @param cdev
+ * Return: Void.
*/
void qed_init_struct(struct qed_dev *cdev);
/**
- * @brief qed_resc_free -
+ * qed_resc_free: Free device resources.
+ *
+ * @cdev: Qed dev pointer.
*
- * @param cdev
+ * Return: Void.
*/
void qed_resc_free(struct qed_dev *cdev);
/**
- * @brief qed_resc_alloc -
+ * qed_resc_alloc(): Alloc device resources.
*
- * @param cdev
+ * @cdev: Qed dev pointer.
*
- * @return int
+ * Return: Int.
*/
int qed_resc_alloc(struct qed_dev *cdev);
/**
- * @brief qed_resc_setup -
+ * qed_resc_setup(): Setup device resources.
*
- * @param cdev
+ * @cdev: Qed dev pointer.
+ *
+ * Return: Void.
*/
void qed_resc_setup(struct qed_dev *cdev);
@@ -105,94 +113,96 @@ struct qed_hw_init_params {
};
/**
- * @brief qed_hw_init -
+ * qed_hw_init(): Init Qed hardware.
*
- * @param cdev
- * @param p_params
+ * @cdev: Qed dev pointer.
+ * @p_params: Pointers to params.
*
- * @return int
+ * Return: Int.
*/
int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params);
/**
- * @brief qed_hw_timers_stop_all - stop the timers HW block
+ * qed_hw_timers_stop_all(): Stop the timers HW block.
*
- * @param cdev
+ * @cdev: Qed dev pointer.
*
- * @return void
+ * Return: void.
*/
void qed_hw_timers_stop_all(struct qed_dev *cdev);
/**
- * @brief qed_hw_stop -
+ * qed_hw_stop(): Stop Qed hardware.
*
- * @param cdev
+ * @cdev: Qed dev pointer.
*
- * @return int
+ * Return: int.
*/
int qed_hw_stop(struct qed_dev *cdev);
/**
- * @brief qed_hw_stop_fastpath -should be called incase
- * slowpath is still required for the device,
- * but fastpath is not.
+ * qed_hw_stop_fastpath(): Should be called incase
+ * slowpath is still required for the device,
+ * but fastpath is not.
*
- * @param cdev
+ * @cdev: Qed dev pointer.
*
- * @return int
+ * Return: Int.
*/
int qed_hw_stop_fastpath(struct qed_dev *cdev);
/**
- * @brief qed_hw_start_fastpath -restart fastpath traffic,
- * only if hw_stop_fastpath was called
+ * qed_hw_start_fastpath(): Restart fastpath traffic,
+ * only if hw_stop_fastpath was called.
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
*
- * @return int
+ * Return: Int.
*/
int qed_hw_start_fastpath(struct qed_hwfn *p_hwfn);
-
/**
- * @brief qed_hw_prepare -
+ * qed_hw_prepare(): Prepare Qed hardware.
*
- * @param cdev
- * @param personality - personality to initialize
+ * @cdev: Qed dev pointer.
+ * @personality: Personality to initialize.
*
- * @return int
+ * Return: Int.
*/
int qed_hw_prepare(struct qed_dev *cdev,
int personality);
/**
- * @brief qed_hw_remove -
+ * qed_hw_remove(): Remove Qed hardware.
+ *
+ * @cdev: Qed dev pointer.
*
- * @param cdev
+ * Return: Void.
*/
void qed_hw_remove(struct qed_dev *cdev);
/**
- * @brief qed_ptt_acquire - Allocate a PTT window
+ * qed_ptt_acquire(): Allocate a PTT window.
*
- * Should be called at the entry point to the driver (at the beginning of an
- * exported function)
+ * @p_hwfn: HW device data.
*
- * @param p_hwfn
+ * Return: struct qed_ptt.
*
- * @return struct qed_ptt
+ * Should be called at the entry point to the driver (at the beginning of an
+ * exported function).
*/
struct qed_ptt *qed_ptt_acquire(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_ptt_release - Release PTT Window
+ * qed_ptt_release(): Release PTT Window.
*
- * Should be called at the end of a flow - at the end of the function that
- * acquired the PTT.
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
*
+ * Return: Void.
*
- * @param p_hwfn
- * @param p_ptt
+ * Should be called at the end of a flow - at the end of the function that
+ * acquired the PTT.
*/
void qed_ptt_release(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
@@ -205,15 +215,17 @@ enum qed_dmae_address_type_t {
};
/**
- * @brief qed_dmae_host2grc - copy data from source addr to
- * dmae registers using the given ptt
+ * qed_dmae_host2grc(): Copy data from source addr to
+ * dmae registers using the given ptt.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @source_addr: Source address.
+ * @grc_addr: GRC address (dmae_data_offset).
+ * @size_in_dwords: Size.
+ * @p_params: (default parameters will be used in case of NULL).
*
- * @param p_hwfn
- * @param p_ptt
- * @param source_addr
- * @param grc_addr (dmae_data_offset)
- * @param size_in_dwords
- * @param p_params (default parameters will be used in case of NULL)
+ * Return: Int.
*/
int
qed_dmae_host2grc(struct qed_hwfn *p_hwfn,
@@ -224,29 +236,34 @@ qed_dmae_host2grc(struct qed_hwfn *p_hwfn,
struct qed_dmae_params *p_params);
/**
- * @brief qed_dmae_grc2host - Read data from dmae data offset
- * to source address using the given ptt
+ * qed_dmae_grc2host(): Read data from dmae data offset
+ * to source address using the given ptt.
+ *
+ * @p_ptt: P_ptt.
+ * @grc_addr: GRC address (dmae_data_offset).
+ * @dest_addr: Destination Address.
+ * @size_in_dwords: Size.
+ * @p_params: (default parameters will be used in case of NULL).
*
- * @param p_ptt
- * @param grc_addr (dmae_data_offset)
- * @param dest_addr
- * @param size_in_dwords
- * @param p_params (default parameters will be used in case of NULL)
+ * Return: Int.
*/
int qed_dmae_grc2host(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
u32 grc_addr, dma_addr_t dest_addr, u32 size_in_dwords,
struct qed_dmae_params *p_params);
/**
- * @brief qed_dmae_host2host - copy data from to source address
- * to a destination adress (for SRIOV) using the given ptt
+ * qed_dmae_host2host(): Copy data from to source address
+ * to a destination adrress (for SRIOV) using the given
+ * ptt.
*
- * @param p_hwfn
- * @param p_ptt
- * @param source_addr
- * @param dest_addr
- * @param size_in_dwords
- * @param p_params (default parameters will be used in case of NULL)
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @source_addr: Source address.
+ * @dest_addr: Destination address.
+ * @size_in_dwords: size.
+ * @p_params: (default parameters will be used in case of NULL).
+ *
+ * Return: Int.
*/
int qed_dmae_host2host(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
@@ -259,51 +276,51 @@ int qed_chain_alloc(struct qed_dev *cdev, struct qed_chain *chain,
void qed_chain_free(struct qed_dev *cdev, struct qed_chain *chain);
/**
- * @@brief qed_fw_l2_queue - Get absolute L2 queue ID
+ * qed_fw_l2_queue(): Get absolute L2 queue ID.
*
- * @param p_hwfn
- * @param src_id - relative to p_hwfn
- * @param dst_id - absolute per engine
+ * @p_hwfn: HW device data.
+ * @src_id: Relative to p_hwfn.
+ * @dst_id: Absolute per engine.
*
- * @return int
+ * Return: Int.
*/
int qed_fw_l2_queue(struct qed_hwfn *p_hwfn,
u16 src_id,
u16 *dst_id);
/**
- * @@brief qed_fw_vport - Get absolute vport ID
+ * qed_fw_vport(): Get absolute vport ID.
*
- * @param p_hwfn
- * @param src_id - relative to p_hwfn
- * @param dst_id - absolute per engine
+ * @p_hwfn: HW device data.
+ * @src_id: Relative to p_hwfn.
+ * @dst_id: Absolute per engine.
*
- * @return int
+ * Return: Int.
*/
int qed_fw_vport(struct qed_hwfn *p_hwfn,
u8 src_id,
u8 *dst_id);
/**
- * @@brief qed_fw_rss_eng - Get absolute RSS engine ID
+ * qed_fw_rss_eng(): Get absolute RSS engine ID.
*
- * @param p_hwfn
- * @param src_id - relative to p_hwfn
- * @param dst_id - absolute per engine
+ * @p_hwfn: HW device data.
+ * @src_id: Relative to p_hwfn.
+ * @dst_id: Absolute per engine.
*
- * @return int
+ * Return: Int.
*/
int qed_fw_rss_eng(struct qed_hwfn *p_hwfn,
u8 src_id,
u8 *dst_id);
/**
- * @brief qed_llh_get_num_ppfid - Return the allocated number of LLH filter
- * banks that are allocated to the PF.
+ * qed_llh_get_num_ppfid(): Return the allocated number of LLH filter
+ * banks that are allocated to the PF.
*
- * @param cdev
+ * @cdev: Qed dev pointer.
*
- * @return u8 - Number of LLH filter banks
+ * Return: u8 Number of LLH filter banks.
*/
u8 qed_llh_get_num_ppfid(struct qed_dev *cdev);
@@ -314,45 +331,50 @@ enum qed_eng {
};
/**
- * @brief qed_llh_set_ppfid_affinity - Set the engine affinity for the given
- * LLH filter bank.
+ * qed_llh_set_ppfid_affinity(): Set the engine affinity for the given
+ * LLH filter bank.
*
- * @param cdev
- * @param ppfid - relative within the allocated ppfids ('0' is the default one).
- * @param eng
+ * @cdev: Qed dev pointer.
+ * @ppfid: Relative within the allocated ppfids ('0' is the default one).
+ * @eng: Engine.
*
- * @return int
+ * Return: Int.
*/
int qed_llh_set_ppfid_affinity(struct qed_dev *cdev,
u8 ppfid, enum qed_eng eng);
/**
- * @brief qed_llh_set_roce_affinity - Set the RoCE engine affinity
+ * qed_llh_set_roce_affinity(): Set the RoCE engine affinity.
*
- * @param cdev
- * @param eng
+ * @cdev: Qed dev pointer.
+ * @eng: Engine.
*
- * @return int
+ * Return: Int.
*/
int qed_llh_set_roce_affinity(struct qed_dev *cdev, enum qed_eng eng);
/**
- * @brief qed_llh_add_mac_filter - Add a LLH MAC filter into the given filter
- * bank.
+ * qed_llh_add_mac_filter(): Add a LLH MAC filter into the given filter
+ * bank.
+ *
+ * @cdev: Qed dev pointer.
+ * @ppfid: Relative within the allocated ppfids ('0' is the default one).
+ * @mac_addr: MAC to add.
*
- * @param cdev
- * @param ppfid - relative within the allocated ppfids ('0' is the default one).
- * @param mac_addr - MAC to add
+ * Return: Int.
*/
int qed_llh_add_mac_filter(struct qed_dev *cdev,
- u8 ppfid, u8 mac_addr[ETH_ALEN]);
+ u8 ppfid, const u8 mac_addr[ETH_ALEN]);
/**
- * @brief qed_llh_remove_mac_filter - Remove a LLH MAC filter from the given
- * filter bank.
+ * qed_llh_remove_mac_filter(): Remove a LLH MAC filter from the given
+ * filter bank.
+ *
+ * @cdev: Qed dev pointer.
+ * @ppfid: Ppfid.
+ * @mac_addr: MAC to remove
*
- * @param p_ptt
- * @param p_filter - MAC to remove
+ * Return: Void.
*/
void qed_llh_remove_mac_filter(struct qed_dev *cdev,
u8 ppfid, u8 mac_addr[ETH_ALEN]);
@@ -368,15 +390,16 @@ enum qed_llh_prot_filter_type_t {
};
/**
- * @brief qed_llh_add_protocol_filter - Add a LLH protocol filter into the
- * given filter bank.
+ * qed_llh_add_protocol_filter(): Add a LLH protocol filter into the
+ * given filter bank.
*
- * @param cdev
- * @param ppfid - relative within the allocated ppfids ('0' is the default one).
- * @param type - type of filters and comparing
- * @param source_port_or_eth_type - source port or ethertype to add
- * @param dest_port - destination port to add
- * @param type - type of filters and comparing
+ * @cdev: Qed dev pointer.
+ * @ppfid: Relative within the allocated ppfids ('0' is the default one).
+ * @type: Type of filters and comparing.
+ * @source_port_or_eth_type: Source port or ethertype to add.
+ * @dest_port: Destination port to add.
+ *
+ * Return: Int.
*/
int
qed_llh_add_protocol_filter(struct qed_dev *cdev,
@@ -385,14 +408,14 @@ qed_llh_add_protocol_filter(struct qed_dev *cdev,
u16 source_port_or_eth_type, u16 dest_port);
/**
- * @brief qed_llh_remove_protocol_filter - Remove a LLH protocol filter from
- * the given filter bank.
+ * qed_llh_remove_protocol_filter(): Remove a LLH protocol filter from
+ * the given filter bank.
*
- * @param cdev
- * @param ppfid - relative within the allocated ppfids ('0' is the default one).
- * @param type - type of filters and comparing
- * @param source_port_or_eth_type - source port or ethertype to add
- * @param dest_port - destination port to add
+ * @cdev: Qed dev pointer.
+ * @ppfid: Relative within the allocated ppfids ('0' is the default one).
+ * @type: Type of filters and comparing.
+ * @source_port_or_eth_type: Source port or ethertype to add.
+ * @dest_port: Destination port to add.
*/
void
qed_llh_remove_protocol_filter(struct qed_dev *cdev,
@@ -401,31 +424,31 @@ qed_llh_remove_protocol_filter(struct qed_dev *cdev,
u16 source_port_or_eth_type, u16 dest_port);
/**
- * *@brief Cleanup of previous driver remains prior to load
+ * qed_final_cleanup(): Cleanup of previous driver remains prior to load.
*
- * @param p_hwfn
- * @param p_ptt
- * @param id - For PF, engine-relative. For VF, PF-relative.
- * @param is_vf - true iff cleanup is made for a VF.
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @id: For PF, engine-relative. For VF, PF-relative.
+ * @is_vf: True iff cleanup is made for a VF.
*
- * @return int
+ * Return: Int.
*/
int qed_final_cleanup(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u16 id, bool is_vf);
/**
- * @brief qed_get_queue_coalesce - Retrieve coalesce value for a given queue.
+ * qed_get_queue_coalesce(): Retrieve coalesce value for a given queue.
*
- * @param p_hwfn
- * @param p_coal - store coalesce value read from the hardware.
- * @param p_handle
+ * @p_hwfn: HW device data.
+ * @coal: Store coalesce value read from the hardware.
+ * @handle: P_handle.
*
- * @return int
+ * Return: Int.
**/
int qed_get_queue_coalesce(struct qed_hwfn *p_hwfn, u16 *coal, void *handle);
/**
- * @brief qed_set_queue_coalesce - Configure coalesce parameters for Rx and
+ * qed_set_queue_coalesce(): Configure coalesce parameters for Rx and
* Tx queue. The fact that we can configure coalescing to up to 511, but on
* varying accuracy [the bigger the value the less accurate] up to a mistake
* of 3usec for the highest values.
@@ -433,37 +456,38 @@ int qed_get_queue_coalesce(struct qed_hwfn *p_hwfn, u16 *coal, void *handle);
* should be in same range [i.e., either 0-0x7f, 0x80-0xff or 0x100-0x1ff]
* otherwise configuration would break.
*
+ * @rx_coal: Rx Coalesce value in micro seconds.
+ * @tx_coal: TX Coalesce value in micro seconds.
+ * @p_handle: P_handle.
*
- * @param rx_coal - Rx Coalesce value in micro seconds.
- * @param tx_coal - TX Coalesce value in micro seconds.
- * @param p_handle
- *
- * @return int
+ * Return: Int.
**/
int
qed_set_queue_coalesce(u16 rx_coal, u16 tx_coal, void *p_handle);
/**
- * @brief qed_pglueb_set_pfid_enable - Enable or disable PCI BUS MASTER
+ * qed_pglueb_set_pfid_enable(): Enable or disable PCI BUS MASTER.
*
- * @param p_hwfn
- * @param p_ptt
- * @param b_enable - true/false
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @b_enable: True/False.
*
- * @return int
+ * Return: Int.
*/
int qed_pglueb_set_pfid_enable(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, bool b_enable);
/**
- * @brief db_recovery_add - add doorbell information to the doorbell
- * recovery mechanism.
+ * qed_db_recovery_add(): add doorbell information to the doorbell
+ * recovery mechanism.
+ *
+ * @cdev: Qed dev pointer.
+ * @db_addr: Doorbell address.
+ * @db_data: Address of where db_data is stored.
+ * @db_width: Doorbell is 32b pr 64b.
+ * @db_space: Doorbell recovery addresses are user or kernel space.
*
- * @param cdev
- * @param db_addr - doorbell address
- * @param db_data - address of where db_data is stored
- * @param db_width - doorbell is 32b pr 64b
- * @param db_space - doorbell recovery addresses are user or kernel space
+ * Return: Int.
*/
int qed_db_recovery_add(struct qed_dev *cdev,
void __iomem *db_addr,
@@ -472,17 +496,18 @@ int qed_db_recovery_add(struct qed_dev *cdev,
enum qed_db_rec_space db_space);
/**
- * @brief db_recovery_del - remove doorbell information from the doorbell
+ * qed_db_recovery_del() - remove doorbell information from the doorbell
* recovery mechanism. db_data serves as key (db_addr is not unique).
*
- * @param cdev
- * @param db_addr - doorbell address
- * @param db_data - address where db_data is stored. Serves as key for the
+ * @cdev: Qed dev pointer.
+ * @db_addr: doorbell address.
+ * @db_data: address where db_data is stored. Serves as key for the
* entry to delete.
+ *
+ * Return: Int.
*/
int qed_db_recovery_del(struct qed_dev *cdev,
void __iomem *db_addr, void *db_data);
-
const char *qed_hw_get_resc_name(enum qed_resources res_id);
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_devlink.c b/drivers/net/ethernet/qlogic/qed/qed_devlink.c
index 78070682f2df..6bb4e165b592 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_devlink.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_devlink.c
@@ -215,10 +215,6 @@ struct devlink *qed_devlink_register(struct qed_dev *cdev)
qdevlink = devlink_priv(dl);
qdevlink->cdev = cdev;
- rc = devlink_register(dl);
- if (rc)
- goto err_free;
-
rc = devlink_params_register(dl, qed_devlink_params,
ARRAY_SIZE(qed_devlink_params));
if (rc)
@@ -229,17 +225,13 @@ struct devlink *qed_devlink_register(struct qed_dev *cdev)
QED_DEVLINK_PARAM_ID_IWARP_CMT,
value);
- devlink_params_publish(dl);
cdev->iwarp_cmt = false;
qed_fw_reporters_create(dl);
-
+ devlink_register(dl);
return dl;
err_unregister:
- devlink_unregister(dl);
-
-err_free:
devlink_free(dl);
return ERR_PTR(rc);
@@ -250,11 +242,11 @@ void qed_devlink_unregister(struct devlink *devlink)
if (!devlink)
return;
+ devlink_unregister(devlink);
qed_fw_reporters_destroy(devlink);
devlink_params_unregister(devlink, qed_devlink_params,
ARRAY_SIZE(qed_devlink_params));
- devlink_unregister(devlink);
devlink_free(devlink);
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c
index b768f0698170..3764190b948e 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c
@@ -30,6 +30,7 @@
#include "qed_hsi.h"
#include "qed_hw.h"
#include "qed_int.h"
+#include "qed_iro_hsi.h"
#include "qed_ll2.h"
#include "qed_mcp.h"
#include "qed_reg_addr.h"
@@ -89,7 +90,7 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn,
struct qed_fcoe_pf_params *fcoe_pf_params = NULL;
struct fcoe_init_ramrod_params *p_ramrod = NULL;
struct fcoe_init_func_ramrod_data *p_data;
- struct e4_fcoe_conn_context *p_cxt = NULL;
+ struct fcoe_conn_context *p_cxt = NULL;
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
struct qed_cxt_info cxt_info;
@@ -144,7 +145,7 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn,
memset(p_cxt, 0, sizeof(*p_cxt));
SET_FIELD(p_cxt->tstorm_ag_context.flags3,
- E4_TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN, 1);
+ TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN, 1);
fcoe_pf_params->dummy_icid = (u16)dummy_cid;
@@ -506,10 +507,9 @@ static void __iomem *qed_fcoe_get_primary_bdq_prod(struct qed_hwfn *p_hwfn,
{
if (RESC_NUM(p_hwfn, QED_BDQ)) {
return (u8 __iomem *)p_hwfn->regview +
- GTT_BAR0_MAP_REG_MSDM_RAM +
- MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(RESC_START(p_hwfn,
- QED_BDQ),
- bdq_id);
+ GET_GTT_BDQ_REG_ADDR(GTT_BAR0_MAP_REG_MSDM_RAM,
+ MSTORM_SCSI_BDQ_EXT_PROD,
+ RESC_START(p_hwfn, QED_BDQ), bdq_id);
} else {
DP_NOTICE(p_hwfn, "BDQ is not allocated!\n");
return NULL;
@@ -521,10 +521,9 @@ static void __iomem *qed_fcoe_get_secondary_bdq_prod(struct qed_hwfn *p_hwfn,
{
if (RESC_NUM(p_hwfn, QED_BDQ)) {
return (u8 __iomem *)p_hwfn->regview +
- GTT_BAR0_MAP_REG_TSDM_RAM +
- TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(RESC_START(p_hwfn,
- QED_BDQ),
- bdq_id);
+ GET_GTT_BDQ_REG_ADDR(GTT_BAR0_MAP_REG_TSDM_RAM,
+ TSTORM_SCSI_BDQ_EXT_PROD,
+ RESC_START(p_hwfn, QED_BDQ), bdq_id);
} else {
DP_NOTICE(p_hwfn, "BDQ is not allocated!\n");
return NULL;
@@ -549,7 +548,7 @@ int qed_fcoe_alloc(struct qed_hwfn *p_hwfn)
void qed_fcoe_setup(struct qed_hwfn *p_hwfn)
{
- struct e4_fcoe_task_context *p_task_ctx = NULL;
+ struct fcoe_task_context *p_task_ctx = NULL;
u32 i, lc;
int rc;
@@ -561,7 +560,7 @@ void qed_fcoe_setup(struct qed_hwfn *p_hwfn)
if (rc)
continue;
- memset(p_task_ctx, 0, sizeof(struct e4_fcoe_task_context));
+ memset(p_task_ctx, 0, sizeof(struct fcoe_task_context));
lc = 0;
SET_FIELD(lc, TIMERS_CONTEXT_VALIDLC0, 1);
@@ -572,7 +571,7 @@ void qed_fcoe_setup(struct qed_hwfn *p_hwfn)
p_task_ctx->timer_context.logical_client_1 = cpu_to_le32(lc);
SET_FIELD(p_task_ctx->tstorm_ag_context.flags0,
- E4_TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE, 1);
+ TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE, 1);
}
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index fb1baa2da2d0..f2cedbd9489c 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/* QLogic qed NIC Driver
* Copyright (c) 2015-2017 QLogic Corporation
- * Copyright (c) 2019-2020 Marvell International Ltd.
+ * Copyright (c) 2019-2021 Marvell International Ltd.
*/
#ifndef _QED_HSI_H
@@ -38,7 +38,7 @@ enum common_event_opcode {
COMMON_EVENT_VF_PF_CHANNEL,
COMMON_EVENT_VF_FLR,
COMMON_EVENT_PF_UPDATE,
- COMMON_EVENT_MALICIOUS_VF,
+ COMMON_EVENT_FW_ERROR,
COMMON_EVENT_RL_UPDATE,
COMMON_EVENT_EMPTY,
MAX_COMMON_EVENT_OPCODE
@@ -84,6 +84,13 @@ enum core_l4_pseudo_checksum_mode {
MAX_CORE_L4_PSEUDO_CHECKSUM_MODE
};
+/* LL2 SP error code */
+enum core_ll2_error_code {
+ LL2_OK = 0,
+ LL2_ERROR,
+ MAX_CORE_LL2_ERROR_CODE
+};
+
/* Light-L2 RX Producers in Tstorm RAM */
struct core_ll2_port_stats {
struct regpair gsi_invalid_hdr;
@@ -123,6 +130,15 @@ struct core_ll2_ustorm_per_queue_stat {
struct regpair rcv_bcast_pkts;
};
+struct core_ll2_rx_per_queue_stat {
+ struct core_ll2_tstorm_per_queue_stat tstorm_stat;
+ struct core_ll2_ustorm_per_queue_stat ustorm_stat;
+};
+
+struct core_ll2_tx_per_queue_stat {
+ struct core_ll2_pstorm_per_queue_stat pstorm_stat;
+};
+
/* Structure for doorbell data, in PWM mode, for RX producers update. */
struct core_pwm_prod_update_data {
__le16 icid; /* internal CID */
@@ -135,6 +151,15 @@ struct core_pwm_prod_update_data {
struct core_ll2_rx_prod prod; /* Producers */
};
+/* Ramrod data for rx/tx queue statistics query ramrod */
+struct core_queue_stats_query_ramrod_data {
+ u8 rx_stat;
+ u8 tx_stat;
+ __le16 reserved[3];
+ struct regpair rx_stat_addr;
+ struct regpair tx_stat_addr;
+};
+
/* Core Ramrod Command IDs (light L2) */
enum core_ramrod_cmd_id {
CORE_RAMROD_UNUSED,
@@ -210,7 +235,8 @@ struct core_rx_fast_path_cqe {
__le16 vlan;
struct core_rx_cqe_opaque_data opaque_data;
struct parsing_err_flags err_flags;
- __le16 reserved0;
+ u8 packet_source;
+ u8 reserved0;
__le32 reserved1[3];
};
@@ -226,7 +252,8 @@ struct core_rx_gsi_offload_cqe {
__le16 qp_id;
__le32 src_qp;
struct core_rx_cqe_opaque_data opaque_data;
- __le32 reserved;
+ u8 packet_source;
+ u8 reserved[3];
};
/* Core RX CQE for Light L2 */
@@ -245,6 +272,15 @@ union core_rx_cqe_union {
struct core_rx_slow_path_cqe rx_cqe_sp;
};
+/* RX packet source. */
+enum core_rx_pkt_source {
+ CORE_RX_PKT_SOURCE_NETWORK = 0,
+ CORE_RX_PKT_SOURCE_LB,
+ CORE_RX_PKT_SOURCE_TX,
+ CORE_RX_PKT_SOURCE_LL2_TX,
+ MAX_CORE_RX_PKT_SOURCE
+};
+
/* Ramrod data for rx queue start ramrod */
struct core_rx_start_ramrod_data {
struct regpair bd_base;
@@ -362,7 +398,7 @@ struct core_tx_update_ramrod_data {
u8 update_qm_pq_id_flg;
u8 reserved0;
__le16 qm_pq_id;
- __le32 reserved1;
+ __le32 reserved1[1];
};
/* Enum flag for what type of dcb data to update */
@@ -386,224 +422,222 @@ struct pstorm_core_conn_st_ctx {
/* Core Slowpath Connection storm context of Xstorm */
struct xstorm_core_conn_st_ctx {
- __le32 spq_base_lo;
- __le32 spq_base_hi;
- struct regpair consolid_base_addr;
+ struct regpair spq_base_addr;
+ __le32 reserved0[2];
__le16 spq_cons;
- __le16 consolid_cons;
- __le32 reserved0[55];
+ __le16 reserved1[111];
};
-struct e4_xstorm_core_conn_ag_ctx {
+struct xstorm_core_conn_ag_ctx {
u8 reserved0;
u8 state;
u8 flags0;
-#define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED1_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED1_SHIFT 1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED2_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED2_SHIFT 2
-#define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED3_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED3_SHIFT 4
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED4_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED4_SHIFT 5
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED5_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED5_SHIFT 6
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED6_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED6_SHIFT 7
+#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED1_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED2_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED3_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED4_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED4_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED5_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED5_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED6_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED6_SHIFT 7
u8 flags1;
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED7_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED7_SHIFT 0
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED8_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED8_SHIFT 1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED9_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED9_SHIFT 2
-#define E4_XSTORM_CORE_CONN_AG_CTX_BIT11_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_BIT11_SHIFT 3
-#define E4_XSTORM_CORE_CONN_AG_CTX_BIT12_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_BIT12_SHIFT 4
-#define E4_XSTORM_CORE_CONN_AG_CTX_BIT13_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_BIT13_SHIFT 5
-#define E4_XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
-#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED7_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED7_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED8_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED8_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED9_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED9_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_BIT11_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT11_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_BIT12_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT12_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_BIT13_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT13_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
u8 flags2;
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 0
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 2
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 4
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 6
u8 flags3;
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 0
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 2
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 4
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 6
u8 flags4;
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 0
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 2
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 4
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF11_MASK 0x3
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF11_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF11_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF11_SHIFT 6
u8 flags5;
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF12_MASK 0x3
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF12_SHIFT 0
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF13_MASK 0x3
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF13_SHIFT 2
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF14_MASK 0x3
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF14_SHIFT 4
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF15_MASK 0x3
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF15_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_CF12_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF12_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_CF13_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF13_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_CF14_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF14_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF15_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF15_SHIFT 6
u8 flags6;
-#define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_MASK 0x3
-#define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_SHIFT 0
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF17_MASK 0x3
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF17_SHIFT 2
-#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_MASK 0x3
-#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_SHIFT 4
-#define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_MASK 0x3
-#define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_CF17_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF17_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
u8 flags7;
-#define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
-#define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED10_MASK 0x3
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED10_SHIFT 2
-#define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_MASK 0x3
-#define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_SHIFT 4
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 6
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 7
+#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED10_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED10_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 7
u8 flags8;
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 0
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 1
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 2
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 3
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 4
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 5
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 6
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 7
+#define XSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 7
u8 flags9;
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 0
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF11EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF11EN_SHIFT 1
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF12EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF12EN_SHIFT 2
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF13EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF13EN_SHIFT 3
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF14EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF14EN_SHIFT 4
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF15EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF15EN_SHIFT 5
-#define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_SHIFT 6
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF17EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF17EN_SHIFT 7
+#define XSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_CF11EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF11EN_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_CF12EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF12EN_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_CF13EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF13EN_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_CF14EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF14EN_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF15EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF15EN_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_CF17EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF17EN_SHIFT 7
u8 flags10;
-#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
-#define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
-#define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED11_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED11_SHIFT 3
-#define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF23EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF23EN_SHIFT 5
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED12_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED12_SHIFT 6
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED13_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED13_SHIFT 7
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED11_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED11_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF23EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF23EN_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED12_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED12_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED13_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED13_SHIFT 7
u8 flags11;
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED14_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED14_SHIFT 0
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED15_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED15_SHIFT 1
-#define E4_XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
-#define E4_XSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 3
-#define E4_XSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 4
-#define E4_XSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 5
-#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
-#define E4_XSTORM_CORE_CONN_AG_CTX_RULE9EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RULE9EN_SHIFT 7
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED14_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED14_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED15_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED15_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE9EN_SHIFT 7
u8 flags12;
-#define E4_XSTORM_CORE_CONN_AG_CTX_RULE10EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RULE10EN_SHIFT 0
-#define E4_XSTORM_CORE_CONN_AG_CTX_RULE11EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RULE11EN_SHIFT 1
-#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
-#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
-#define E4_XSTORM_CORE_CONN_AG_CTX_RULE14EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RULE14EN_SHIFT 4
-#define E4_XSTORM_CORE_CONN_AG_CTX_RULE15EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RULE15EN_SHIFT 5
-#define E4_XSTORM_CORE_CONN_AG_CTX_RULE16EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RULE16EN_SHIFT 6
-#define E4_XSTORM_CORE_CONN_AG_CTX_RULE17EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RULE17EN_SHIFT 7
+#define XSTORM_CORE_CONN_AG_CTX_RULE10EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE10EN_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE14EN_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE17EN_SHIFT 7
u8 flags13;
-#define E4_XSTORM_CORE_CONN_AG_CTX_RULE18EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RULE18EN_SHIFT 0
-#define E4_XSTORM_CORE_CONN_AG_CTX_RULE19EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RULE19EN_SHIFT 1
-#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
-#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
-#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
-#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
-#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
-#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+#define XSTORM_CORE_CONN_AG_CTX_RULE18EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE18EN_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE19EN_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
u8 flags14;
-#define E4_XSTORM_CORE_CONN_AG_CTX_BIT16_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_BIT16_SHIFT 0
-#define E4_XSTORM_CORE_CONN_AG_CTX_BIT17_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_BIT17_SHIFT 1
-#define E4_XSTORM_CORE_CONN_AG_CTX_BIT18_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_BIT18_SHIFT 2
-#define E4_XSTORM_CORE_CONN_AG_CTX_BIT19_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_BIT19_SHIFT 3
-#define E4_XSTORM_CORE_CONN_AG_CTX_BIT20_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_BIT20_SHIFT 4
-#define E4_XSTORM_CORE_CONN_AG_CTX_BIT21_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_BIT21_SHIFT 5
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF23_MASK 0x3
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF23_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_BIT16_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT16_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_BIT17_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT17_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_BIT18_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT18_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_BIT19_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT19_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_BIT20_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT20_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_BIT21_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT21_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_CF23_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF23_SHIFT 6
u8 byte2;
__le16 physical_q0;
__le16 consolid_prod;
@@ -657,89 +691,89 @@ struct e4_xstorm_core_conn_ag_ctx {
__le16 word15;
};
-struct e4_tstorm_core_conn_ag_ctx {
+struct tstorm_core_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define E4_TSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
-#define E4_TSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_TSTORM_CORE_CONN_AG_CTX_BIT2_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT 2
-#define E4_TSTORM_CORE_CONN_AG_CTX_BIT3_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT 3
-#define E4_TSTORM_CORE_CONN_AG_CTX_BIT4_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT 4
-#define E4_TSTORM_CORE_CONN_AG_CTX_BIT5_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT 5
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 6
+#define TSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
+#define TSTORM_CORE_CONN_AG_CTX_BIT2_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_BIT3_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT 3
+#define TSTORM_CORE_CONN_AG_CTX_BIT4_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_BIT5_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT 5
+#define TSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 6
u8 flags1;
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 0
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 2
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 4
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 6
+#define TSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 6
u8 flags2;
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 0
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 2
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 4
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 6
+#define TSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 6
u8 flags3;
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 0
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 2
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 4
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 5
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 6
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 7
+#define TSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 5
+#define TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 6
+#define TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 7
u8 flags4;
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 0
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 1
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 2
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 3
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 4
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 5
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 6
-#define E4_TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 1
+#define TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 3
+#define TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 5
+#define TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 6
+#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags5;
-#define E4_TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define E4_TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define E4_TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define E4_TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define E4_TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define E4_TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define E4_TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define E4_TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
__le32 reg0;
__le32 reg1;
__le32 reg2;
@@ -761,63 +795,63 @@ struct e4_tstorm_core_conn_ag_ctx {
__le32 reg10;
};
-struct e4_ustorm_core_conn_ag_ctx {
+struct ustorm_core_conn_ag_ctx {
u8 reserved;
u8 byte1;
u8 flags0;
-#define E4_USTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
-#define E4_USTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
-#define E4_USTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_USTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_USTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_USTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
-#define E4_USTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_USTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
-#define E4_USTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_USTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
+#define USTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
+#define USTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
+#define USTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
+#define USTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
+#define USTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
+#define USTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
+#define USTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
+#define USTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define E4_USTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3
-#define E4_USTORM_CORE_CONN_AG_CTX_CF3_SHIFT 0
-#define E4_USTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3
-#define E4_USTORM_CORE_CONN_AG_CTX_CF4_SHIFT 2
-#define E4_USTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3
-#define E4_USTORM_CORE_CONN_AG_CTX_CF5_SHIFT 4
-#define E4_USTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3
-#define E4_USTORM_CORE_CONN_AG_CTX_CF6_SHIFT 6
+#define USTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3
+#define USTORM_CORE_CONN_AG_CTX_CF3_SHIFT 0
+#define USTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3
+#define USTORM_CORE_CONN_AG_CTX_CF4_SHIFT 2
+#define USTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3
+#define USTORM_CORE_CONN_AG_CTX_CF5_SHIFT 4
+#define USTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3
+#define USTORM_CORE_CONN_AG_CTX_CF6_SHIFT 6
u8 flags2;
-#define E4_USTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_USTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
-#define E4_USTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_USTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
-#define E4_USTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_USTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
-#define E4_USTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1
-#define E4_USTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 3
-#define E4_USTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1
-#define E4_USTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 4
-#define E4_USTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1
-#define E4_USTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 5
-#define E4_USTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1
-#define E4_USTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 6
-#define E4_USTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_USTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define USTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define USTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define USTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define USTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 3
+#define USTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 4
+#define USTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 5
+#define USTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 6
+#define USTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags3;
-#define E4_USTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_USTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define E4_USTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_USTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define E4_USTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_USTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define E4_USTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_USTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define E4_USTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define E4_USTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define E4_USTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define E4_USTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define E4_USTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define E4_USTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define E4_USTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define E4_USTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define USTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define USTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define USTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define USTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define USTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define USTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define USTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define USTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
u8 byte2;
u8 byte3;
__le16 word0;
@@ -846,15 +880,15 @@ struct tstorm_core_conn_st_ctx {
};
/* core connection context */
-struct e4_core_conn_context {
+struct core_conn_context {
struct ystorm_core_conn_st_ctx ystorm_st_context;
struct regpair ystorm_st_padding[2];
struct pstorm_core_conn_st_ctx pstorm_st_context;
struct regpair pstorm_st_padding[2];
struct xstorm_core_conn_st_ctx xstorm_st_context;
- struct e4_xstorm_core_conn_ag_ctx xstorm_ag_context;
- struct e4_tstorm_core_conn_ag_ctx tstorm_ag_context;
- struct e4_ustorm_core_conn_ag_ctx ustorm_ag_context;
+ struct xstorm_core_conn_ag_ctx xstorm_ag_context;
+ struct tstorm_core_conn_ag_ctx tstorm_ag_context;
+ struct ustorm_core_conn_ag_ctx ustorm_ag_context;
struct mstorm_core_conn_st_ctx mstorm_st_context;
struct ustorm_core_conn_st_ctx ustorm_st_context;
struct regpair ustorm_st_padding[2];
@@ -930,12 +964,12 @@ struct eth_rx_rate_limit {
/* Update RSS indirection table entry command */
struct eth_tstorm_rss_update_data {
- u8 valid;
u8 vport_id;
u8 ind_table_index;
- u8 reserved;
__le16 ind_table_value;
__le16 reserved1;
+ u8 reserved;
+ u8 valid;
};
struct eth_ustorm_per_pf_stat {
@@ -967,19 +1001,20 @@ struct vf_pf_channel_eqe_data {
struct regpair msg_addr;
};
-/* Event Ring malicious VF data */
-struct malicious_vf_eqe_data {
- u8 vf_id;
- u8 err_id;
- __le16 reserved[3];
-};
-
/* Event Ring initial cleanup data */
struct initial_cleanup_eqe_data {
u8 vf_id;
u8 reserved[7];
};
+/* FW error data */
+struct fw_err_data {
+ u8 recovery_scope;
+ u8 err_id;
+ __le16 entity_id;
+ u8 reserved[4];
+};
+
/* Event Data Union */
union event_ring_data {
u8 bytes[8];
@@ -987,8 +1022,8 @@ union event_ring_data {
struct iscsi_eqe_data iscsi_info;
struct iscsi_connect_done_results iscsi_conn_done_info;
union rdma_eqe_data rdma_data;
- struct malicious_vf_eqe_data malicious_vf;
struct initial_cleanup_eqe_data vf_init_cleanup;
+ struct fw_err_data err_data;
};
/* Event Ring Entry */
@@ -1042,6 +1077,15 @@ struct hsi_fp_ver_struct {
u8 major_ver_arr[2];
};
+/* Integration Phase */
+enum integ_phase {
+ INTEG_PHASE_BB_A0_LATEST = 3,
+ INTEG_PHASE_BB_B0_NO_MCP = 10,
+ INTEG_PHASE_BB_B0_WITH_MCP = 11,
+ MAX_INTEG_PHASE
+};
+
+/* Ports mode */
enum iwarp_ll2_tx_queues {
IWARP_LL2_IN_ORDER_TX_QUEUE = 1,
IWARP_LL2_ALIGNED_TX_QUEUE,
@@ -1050,9 +1094,9 @@ enum iwarp_ll2_tx_queues {
MAX_IWARP_LL2_TX_QUEUES
};
-/* Malicious VF error ID */
-enum malicious_vf_error_id {
- MALICIOUS_VF_NO_ERROR,
+/* Function error ID */
+enum func_err_id {
+ FUNC_NO_ERROR,
VF_PF_CHANNEL_NOT_READY,
VF_ZONE_MSG_NOT_VALID,
VF_ZONE_FUNC_NOT_ENABLED,
@@ -1087,13 +1131,33 @@ enum malicious_vf_error_id {
CORE_PACKET_SIZE_TOO_LARGE,
CORE_ILLEGAL_BD_FLAGS,
CORE_GSI_PACKET_VIOLATION,
- MAX_MALICIOUS_VF_ERROR_ID,
+ MAX_FUNC_ERR_ID
+};
+
+/* FW error handling mode */
+enum fw_err_mode {
+ FW_ERR_FATAL_ASSERT,
+ FW_ERR_DRV_REPORT,
+ MAX_FW_ERR_MODE
+};
+
+/* FW error recovery scope */
+enum fw_err_recovery_scope {
+ ERR_SCOPE_INVALID,
+ ERR_SCOPE_TX_Q,
+ ERR_SCOPE_RX_Q,
+ ERR_SCOPE_QP,
+ ERR_SCOPE_VPORT,
+ ERR_SCOPE_FUNC,
+ ERR_SCOPE_PORT,
+ ERR_SCOPE_ENGINE,
+ MAX_FW_ERR_RECOVERY_SCOPE
};
/* Mstorm non-triggering VF zone */
struct mstorm_non_trigger_vf_zone {
struct eth_mstorm_per_queue_stat eth_queue_stat;
- struct eth_rx_prod_data eth_rx_queue_producers[ETH_MAX_NUM_RX_QUEUES_PER_VF_QUAD];
+ struct eth_rx_prod_data eth_rx_queue_producers[ETH_MAX_RXQ_VF_QUAD];
};
/* Mstorm VF zone */
@@ -1148,7 +1212,7 @@ struct pf_start_tunnel_config {
/* Ramrod data for PF start ramrod */
struct pf_start_ramrod_data {
struct regpair event_ring_pbl_addr;
- struct regpair consolid_q_pbl_addr;
+ struct regpair consolid_q_pbl_base_addr;
struct pf_start_tunnel_config tunnel_config;
__le16 event_ring_sb_id;
u8 base_vf_id;
@@ -1166,6 +1230,9 @@ struct pf_start_ramrod_data {
u8 reserved0;
struct hsi_fp_ver_struct hsi_fp_ver;
struct outer_tag_config_struct outer_tag_config;
+ u8 pf_fp_err_mode;
+ u8 consolid_q_num_pages;
+ u8 reserved[6];
};
/* Data for port update ramrod */
@@ -1230,6 +1297,13 @@ enum ports_mode {
MAX_PORTS_MODE
};
+/* Protocol-common error code */
+enum protocol_common_error_code {
+ COMMON_ERR_CODE_OK = 0,
+ COMMON_ERR_CODE_ERROR,
+ MAX_PROTOCOL_COMMON_ERROR_CODE
+};
+
/* use to index in hsi_fp_[major|minor]_ver_arr per protocol */
enum protocol_version_array_key {
ETH_VER_KEY = 0,
@@ -1525,74 +1599,74 @@ enum dmae_cmd_src_enum {
MAX_DMAE_CMD_SRC_ENUM
};
-struct e4_mstorm_core_conn_ag_ctx {
+struct mstorm_core_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define E4_MSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
-#define E4_MSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
-#define E4_MSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_MSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_MSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_MSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
-#define E4_MSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_MSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
-#define E4_MSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_MSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
+#define MSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
+#define MSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
+#define MSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
+#define MSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
+#define MSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
+#define MSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
+#define MSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
+#define MSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define E4_MSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_MSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
-#define E4_MSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_MSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
-#define E4_MSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_MSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
-#define E4_MSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_MSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define E4_MSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_MSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define E4_MSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_MSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define E4_MSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_MSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define E4_MSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_MSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define MSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define MSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define MSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
__le16 word0;
__le16 word1;
__le32 reg0;
__le32 reg1;
};
-struct e4_ystorm_core_conn_ag_ctx {
+struct ystorm_core_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define E4_YSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
-#define E4_YSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
-#define E4_YSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_YSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_YSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_YSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
-#define E4_YSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_YSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
-#define E4_YSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_YSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
+#define YSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
+#define YSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
+#define YSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
+#define YSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
+#define YSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
+#define YSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
+#define YSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
+#define YSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define E4_YSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_YSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
-#define E4_YSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_YSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
-#define E4_YSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_YSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
-#define E4_YSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_YSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define E4_YSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_YSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define E4_YSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_YSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define E4_YSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_YSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define E4_YSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_YSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define YSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define YSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define YSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
u8 byte2;
u8 byte3;
__le16 word0;
@@ -1704,6 +1778,7 @@ struct igu_msix_vector {
#define IGU_MSIX_VECTOR_RESERVED1_MASK 0xFF
#define IGU_MSIX_VECTOR_RESERVED1_SHIFT 24
};
+
/* per encapsulation type enabling flags */
struct prs_reg_encapsulation_type_en {
u8 flags;
@@ -1778,22 +1853,22 @@ struct qm_rf_opportunistic_mask {
};
/* QM hardware structure of QM map memory */
-struct qm_rf_pq_map_e4 {
+struct qm_rf_pq_map {
__le32 reg;
-#define QM_RF_PQ_MAP_E4_PQ_VALID_MASK 0x1
-#define QM_RF_PQ_MAP_E4_PQ_VALID_SHIFT 0
-#define QM_RF_PQ_MAP_E4_RL_ID_MASK 0xFF
-#define QM_RF_PQ_MAP_E4_RL_ID_SHIFT 1
-#define QM_RF_PQ_MAP_E4_VP_PQ_ID_MASK 0x1FF
-#define QM_RF_PQ_MAP_E4_VP_PQ_ID_SHIFT 9
-#define QM_RF_PQ_MAP_E4_VOQ_MASK 0x1F
-#define QM_RF_PQ_MAP_E4_VOQ_SHIFT 18
-#define QM_RF_PQ_MAP_E4_WRR_WEIGHT_GROUP_MASK 0x3
-#define QM_RF_PQ_MAP_E4_WRR_WEIGHT_GROUP_SHIFT 23
-#define QM_RF_PQ_MAP_E4_RL_VALID_MASK 0x1
-#define QM_RF_PQ_MAP_E4_RL_VALID_SHIFT 25
-#define QM_RF_PQ_MAP_E4_RESERVED_MASK 0x3F
-#define QM_RF_PQ_MAP_E4_RESERVED_SHIFT 26
+#define QM_RF_PQ_MAP_PQ_VALID_MASK 0x1
+#define QM_RF_PQ_MAP_PQ_VALID_SHIFT 0
+#define QM_RF_PQ_MAP_RL_ID_MASK 0xFF
+#define QM_RF_PQ_MAP_RL_ID_SHIFT 1
+#define QM_RF_PQ_MAP_VP_PQ_ID_MASK 0x1FF
+#define QM_RF_PQ_MAP_VP_PQ_ID_SHIFT 9
+#define QM_RF_PQ_MAP_VOQ_MASK 0x1F
+#define QM_RF_PQ_MAP_VOQ_SHIFT 18
+#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_MASK 0x3
+#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_SHIFT 23
+#define QM_RF_PQ_MAP_RL_VALID_MASK 0x1
+#define QM_RF_PQ_MAP_RL_VALID_SHIFT 25
+#define QM_RF_PQ_MAP_RESERVED_MASK 0x3F
+#define QM_RF_PQ_MAP_RESERVED_SHIFT 26
};
/* Completion params for aggregated interrupt completion */
@@ -1831,769 +1906,6 @@ struct virt_mem_desc {
u32 size; /* In bytes */
};
-/****************************************/
-/* Debug Tools HSI constants and macros */
-/****************************************/
-
-enum block_id {
- BLOCK_GRC,
- BLOCK_MISCS,
- BLOCK_MISC,
- BLOCK_DBU,
- BLOCK_PGLUE_B,
- BLOCK_CNIG,
- BLOCK_CPMU,
- BLOCK_NCSI,
- BLOCK_OPTE,
- BLOCK_BMB,
- BLOCK_PCIE,
- BLOCK_MCP,
- BLOCK_MCP2,
- BLOCK_PSWHST,
- BLOCK_PSWHST2,
- BLOCK_PSWRD,
- BLOCK_PSWRD2,
- BLOCK_PSWWR,
- BLOCK_PSWWR2,
- BLOCK_PSWRQ,
- BLOCK_PSWRQ2,
- BLOCK_PGLCS,
- BLOCK_DMAE,
- BLOCK_PTU,
- BLOCK_TCM,
- BLOCK_MCM,
- BLOCK_UCM,
- BLOCK_XCM,
- BLOCK_YCM,
- BLOCK_PCM,
- BLOCK_QM,
- BLOCK_TM,
- BLOCK_DORQ,
- BLOCK_BRB,
- BLOCK_SRC,
- BLOCK_PRS,
- BLOCK_TSDM,
- BLOCK_MSDM,
- BLOCK_USDM,
- BLOCK_XSDM,
- BLOCK_YSDM,
- BLOCK_PSDM,
- BLOCK_TSEM,
- BLOCK_MSEM,
- BLOCK_USEM,
- BLOCK_XSEM,
- BLOCK_YSEM,
- BLOCK_PSEM,
- BLOCK_RSS,
- BLOCK_TMLD,
- BLOCK_MULD,
- BLOCK_YULD,
- BLOCK_XYLD,
- BLOCK_PRM,
- BLOCK_PBF_PB1,
- BLOCK_PBF_PB2,
- BLOCK_RPB,
- BLOCK_BTB,
- BLOCK_PBF,
- BLOCK_RDIF,
- BLOCK_TDIF,
- BLOCK_CDU,
- BLOCK_CCFC,
- BLOCK_TCFC,
- BLOCK_IGU,
- BLOCK_CAU,
- BLOCK_UMAC,
- BLOCK_XMAC,
- BLOCK_MSTAT,
- BLOCK_DBG,
- BLOCK_NIG,
- BLOCK_WOL,
- BLOCK_BMBN,
- BLOCK_IPC,
- BLOCK_NWM,
- BLOCK_NWS,
- BLOCK_MS,
- BLOCK_PHY_PCIE,
- BLOCK_LED,
- BLOCK_AVS_WRAP,
- BLOCK_PXPREQBUS,
- BLOCK_BAR0_MAP,
- BLOCK_MCP_FIO,
- BLOCK_LAST_INIT,
- BLOCK_PRS_FC,
- BLOCK_PBF_FC,
- BLOCK_NIG_LB_FC,
- BLOCK_NIG_LB_FC_PLLH,
- BLOCK_NIG_TX_FC_PLLH,
- BLOCK_NIG_TX_FC,
- BLOCK_NIG_RX_FC_PLLH,
- BLOCK_NIG_RX_FC,
- MAX_BLOCK_ID
-};
-
-/* binary debug buffer types */
-enum bin_dbg_buffer_type {
- BIN_BUF_DBG_MODE_TREE,
- BIN_BUF_DBG_DUMP_REG,
- BIN_BUF_DBG_DUMP_MEM,
- BIN_BUF_DBG_IDLE_CHK_REGS,
- BIN_BUF_DBG_IDLE_CHK_IMMS,
- BIN_BUF_DBG_IDLE_CHK_RULES,
- BIN_BUF_DBG_IDLE_CHK_PARSING_DATA,
- BIN_BUF_DBG_ATTN_BLOCKS,
- BIN_BUF_DBG_ATTN_REGS,
- BIN_BUF_DBG_ATTN_INDEXES,
- BIN_BUF_DBG_ATTN_NAME_OFFSETS,
- BIN_BUF_DBG_BLOCKS,
- BIN_BUF_DBG_BLOCKS_CHIP_DATA,
- BIN_BUF_DBG_BUS_LINES,
- BIN_BUF_DBG_BLOCKS_USER_DATA,
- BIN_BUF_DBG_BLOCKS_CHIP_USER_DATA,
- BIN_BUF_DBG_BUS_LINE_NAME_OFFSETS,
- BIN_BUF_DBG_RESET_REGS,
- BIN_BUF_DBG_PARSING_STRINGS,
- MAX_BIN_DBG_BUFFER_TYPE
-};
-
-
-/* Attention bit mapping */
-struct dbg_attn_bit_mapping {
- u16 data;
-#define DBG_ATTN_BIT_MAPPING_VAL_MASK 0x7FFF
-#define DBG_ATTN_BIT_MAPPING_VAL_SHIFT 0
-#define DBG_ATTN_BIT_MAPPING_IS_UNUSED_BIT_CNT_MASK 0x1
-#define DBG_ATTN_BIT_MAPPING_IS_UNUSED_BIT_CNT_SHIFT 15
-};
-
-/* Attention block per-type data */
-struct dbg_attn_block_type_data {
- u16 names_offset;
- u16 reserved1;
- u8 num_regs;
- u8 reserved2;
- u16 regs_offset;
-
-};
-
-/* Block attentions */
-struct dbg_attn_block {
- struct dbg_attn_block_type_data per_type_data[2];
-};
-
-/* Attention register result */
-struct dbg_attn_reg_result {
- u32 data;
-#define DBG_ATTN_REG_RESULT_STS_ADDRESS_MASK 0xFFFFFF
-#define DBG_ATTN_REG_RESULT_STS_ADDRESS_SHIFT 0
-#define DBG_ATTN_REG_RESULT_NUM_REG_ATTN_MASK 0xFF
-#define DBG_ATTN_REG_RESULT_NUM_REG_ATTN_SHIFT 24
- u16 block_attn_offset;
- u16 reserved;
- u32 sts_val;
- u32 mask_val;
-};
-
-/* Attention block result */
-struct dbg_attn_block_result {
- u8 block_id;
- u8 data;
-#define DBG_ATTN_BLOCK_RESULT_ATTN_TYPE_MASK 0x3
-#define DBG_ATTN_BLOCK_RESULT_ATTN_TYPE_SHIFT 0
-#define DBG_ATTN_BLOCK_RESULT_NUM_REGS_MASK 0x3F
-#define DBG_ATTN_BLOCK_RESULT_NUM_REGS_SHIFT 2
- u16 names_offset;
- struct dbg_attn_reg_result reg_results[15];
-};
-
-/* Mode header */
-struct dbg_mode_hdr {
- u16 data;
-#define DBG_MODE_HDR_EVAL_MODE_MASK 0x1
-#define DBG_MODE_HDR_EVAL_MODE_SHIFT 0
-#define DBG_MODE_HDR_MODES_BUF_OFFSET_MASK 0x7FFF
-#define DBG_MODE_HDR_MODES_BUF_OFFSET_SHIFT 1
-};
-
-/* Attention register */
-struct dbg_attn_reg {
- struct dbg_mode_hdr mode;
- u16 block_attn_offset;
- u32 data;
-#define DBG_ATTN_REG_STS_ADDRESS_MASK 0xFFFFFF
-#define DBG_ATTN_REG_STS_ADDRESS_SHIFT 0
-#define DBG_ATTN_REG_NUM_REG_ATTN_MASK 0xFF
-#define DBG_ATTN_REG_NUM_REG_ATTN_SHIFT 24
- u32 sts_clr_address;
- u32 mask_address;
-};
-
-/* Attention types */
-enum dbg_attn_type {
- ATTN_TYPE_INTERRUPT,
- ATTN_TYPE_PARITY,
- MAX_DBG_ATTN_TYPE
-};
-
-/* Block debug data */
-struct dbg_block {
- u8 name[15];
- u8 associated_storm_letter;
-};
-
-/* Chip-specific block debug data */
-struct dbg_block_chip {
- u8 flags;
-#define DBG_BLOCK_CHIP_IS_REMOVED_MASK 0x1
-#define DBG_BLOCK_CHIP_IS_REMOVED_SHIFT 0
-#define DBG_BLOCK_CHIP_HAS_RESET_REG_MASK 0x1
-#define DBG_BLOCK_CHIP_HAS_RESET_REG_SHIFT 1
-#define DBG_BLOCK_CHIP_UNRESET_BEFORE_DUMP_MASK 0x1
-#define DBG_BLOCK_CHIP_UNRESET_BEFORE_DUMP_SHIFT 2
-#define DBG_BLOCK_CHIP_HAS_DBG_BUS_MASK 0x1
-#define DBG_BLOCK_CHIP_HAS_DBG_BUS_SHIFT 3
-#define DBG_BLOCK_CHIP_HAS_LATENCY_EVENTS_MASK 0x1
-#define DBG_BLOCK_CHIP_HAS_LATENCY_EVENTS_SHIFT 4
-#define DBG_BLOCK_CHIP_RESERVED0_MASK 0x7
-#define DBG_BLOCK_CHIP_RESERVED0_SHIFT 5
- u8 dbg_client_id;
- u8 reset_reg_id;
- u8 reset_reg_bit_offset;
- struct dbg_mode_hdr dbg_bus_mode;
- u16 reserved1;
- u8 reserved2;
- u8 num_of_dbg_bus_lines;
- u16 dbg_bus_lines_offset;
- u32 dbg_select_reg_addr;
- u32 dbg_dword_enable_reg_addr;
- u32 dbg_shift_reg_addr;
- u32 dbg_force_valid_reg_addr;
- u32 dbg_force_frame_reg_addr;
-};
-
-/* Chip-specific block user debug data */
-struct dbg_block_chip_user {
- u8 num_of_dbg_bus_lines;
- u8 has_latency_events;
- u16 names_offset;
-};
-
-/* Block user debug data */
-struct dbg_block_user {
- u8 name[16];
-};
-
-/* Block Debug line data */
-struct dbg_bus_line {
- u8 data;
-#define DBG_BUS_LINE_NUM_OF_GROUPS_MASK 0xF
-#define DBG_BUS_LINE_NUM_OF_GROUPS_SHIFT 0
-#define DBG_BUS_LINE_IS_256B_MASK 0x1
-#define DBG_BUS_LINE_IS_256B_SHIFT 4
-#define DBG_BUS_LINE_RESERVED_MASK 0x7
-#define DBG_BUS_LINE_RESERVED_SHIFT 5
- u8 group_sizes;
-};
-
-/* Condition header for registers dump */
-struct dbg_dump_cond_hdr {
- struct dbg_mode_hdr mode; /* Mode header */
- u8 block_id; /* block ID */
- u8 data_size; /* size in dwords of the data following this header */
-};
-
-/* Memory data for registers dump */
-struct dbg_dump_mem {
- u32 dword0;
-#define DBG_DUMP_MEM_ADDRESS_MASK 0xFFFFFF
-#define DBG_DUMP_MEM_ADDRESS_SHIFT 0
-#define DBG_DUMP_MEM_MEM_GROUP_ID_MASK 0xFF
-#define DBG_DUMP_MEM_MEM_GROUP_ID_SHIFT 24
- u32 dword1;
-#define DBG_DUMP_MEM_LENGTH_MASK 0xFFFFFF
-#define DBG_DUMP_MEM_LENGTH_SHIFT 0
-#define DBG_DUMP_MEM_WIDE_BUS_MASK 0x1
-#define DBG_DUMP_MEM_WIDE_BUS_SHIFT 24
-#define DBG_DUMP_MEM_RESERVED_MASK 0x7F
-#define DBG_DUMP_MEM_RESERVED_SHIFT 25
-};
-
-/* Register data for registers dump */
-struct dbg_dump_reg {
- u32 data;
-#define DBG_DUMP_REG_ADDRESS_MASK 0x7FFFFF
-#define DBG_DUMP_REG_ADDRESS_SHIFT 0
-#define DBG_DUMP_REG_WIDE_BUS_MASK 0x1
-#define DBG_DUMP_REG_WIDE_BUS_SHIFT 23
-#define DBG_DUMP_REG_LENGTH_MASK 0xFF
-#define DBG_DUMP_REG_LENGTH_SHIFT 24
-};
-
-/* Split header for registers dump */
-struct dbg_dump_split_hdr {
- u32 hdr;
-#define DBG_DUMP_SPLIT_HDR_DATA_SIZE_MASK 0xFFFFFF
-#define DBG_DUMP_SPLIT_HDR_DATA_SIZE_SHIFT 0
-#define DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID_MASK 0xFF
-#define DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID_SHIFT 24
-};
-
-/* Condition header for idle check */
-struct dbg_idle_chk_cond_hdr {
- struct dbg_mode_hdr mode; /* Mode header */
- u16 data_size; /* size in dwords of the data following this header */
-};
-
-/* Idle Check condition register */
-struct dbg_idle_chk_cond_reg {
- u32 data;
-#define DBG_IDLE_CHK_COND_REG_ADDRESS_MASK 0x7FFFFF
-#define DBG_IDLE_CHK_COND_REG_ADDRESS_SHIFT 0
-#define DBG_IDLE_CHK_COND_REG_WIDE_BUS_MASK 0x1
-#define DBG_IDLE_CHK_COND_REG_WIDE_BUS_SHIFT 23
-#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_MASK 0xFF
-#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_SHIFT 24
- u16 num_entries;
- u8 entry_size;
- u8 start_entry;
-};
-
-/* Idle Check info register */
-struct dbg_idle_chk_info_reg {
- u32 data;
-#define DBG_IDLE_CHK_INFO_REG_ADDRESS_MASK 0x7FFFFF
-#define DBG_IDLE_CHK_INFO_REG_ADDRESS_SHIFT 0
-#define DBG_IDLE_CHK_INFO_REG_WIDE_BUS_MASK 0x1
-#define DBG_IDLE_CHK_INFO_REG_WIDE_BUS_SHIFT 23
-#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_MASK 0xFF
-#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_SHIFT 24
- u16 size; /* register size in dwords */
- struct dbg_mode_hdr mode; /* Mode header */
-};
-
-/* Idle Check register */
-union dbg_idle_chk_reg {
- struct dbg_idle_chk_cond_reg cond_reg; /* condition register */
- struct dbg_idle_chk_info_reg info_reg; /* info register */
-};
-
-/* Idle Check result header */
-struct dbg_idle_chk_result_hdr {
- u16 rule_id; /* Failing rule index */
- u16 mem_entry_id; /* Failing memory entry index */
- u8 num_dumped_cond_regs; /* number of dumped condition registers */
- u8 num_dumped_info_regs; /* number of dumped condition registers */
- u8 severity; /* from dbg_idle_chk_severity_types enum */
- u8 reserved;
-};
-
-/* Idle Check result register header */
-struct dbg_idle_chk_result_reg_hdr {
- u8 data;
-#define DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM_MASK 0x1
-#define DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM_SHIFT 0
-#define DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID_MASK 0x7F
-#define DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID_SHIFT 1
- u8 start_entry; /* index of the first checked entry */
- u16 size; /* register size in dwords */
-};
-
-/* Idle Check rule */
-struct dbg_idle_chk_rule {
- u16 rule_id; /* Idle Check rule ID */
- u8 severity; /* value from dbg_idle_chk_severity_types enum */
- u8 cond_id; /* Condition ID */
- u8 num_cond_regs; /* number of condition registers */
- u8 num_info_regs; /* number of info registers */
- u8 num_imms; /* number of immediates in the condition */
- u8 reserved1;
- u16 reg_offset; /* offset of this rules registers in the idle check
- * register array (in dbg_idle_chk_reg units).
- */
- u16 imm_offset; /* offset of this rules immediate values in the
- * immediate values array (in dwords).
- */
-};
-
-/* Idle Check rule parsing data */
-struct dbg_idle_chk_rule_parsing_data {
- u32 data;
-#define DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG_MASK 0x1
-#define DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG_SHIFT 0
-#define DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET_MASK 0x7FFFFFFF
-#define DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET_SHIFT 1
-};
-
-/* Idle check severity types */
-enum dbg_idle_chk_severity_types {
- /* idle check failure should cause an error */
- IDLE_CHK_SEVERITY_ERROR,
- /* idle check failure should cause an error only if theres no traffic */
- IDLE_CHK_SEVERITY_ERROR_NO_TRAFFIC,
- /* idle check failure should cause a warning */
- IDLE_CHK_SEVERITY_WARNING,
- MAX_DBG_IDLE_CHK_SEVERITY_TYPES
-};
-
-/* Reset register */
-struct dbg_reset_reg {
- u32 data;
-#define DBG_RESET_REG_ADDR_MASK 0xFFFFFF
-#define DBG_RESET_REG_ADDR_SHIFT 0
-#define DBG_RESET_REG_IS_REMOVED_MASK 0x1
-#define DBG_RESET_REG_IS_REMOVED_SHIFT 24
-#define DBG_RESET_REG_RESERVED_MASK 0x7F
-#define DBG_RESET_REG_RESERVED_SHIFT 25
-};
-
-/* Debug Bus block data */
-struct dbg_bus_block_data {
- u8 enable_mask;
- u8 right_shift;
- u8 force_valid_mask;
- u8 force_frame_mask;
- u8 dword_mask;
- u8 line_num;
- u8 hw_id;
- u8 flags;
-#define DBG_BUS_BLOCK_DATA_IS_256B_LINE_MASK 0x1
-#define DBG_BUS_BLOCK_DATA_IS_256B_LINE_SHIFT 0
-#define DBG_BUS_BLOCK_DATA_RESERVED_MASK 0x7F
-#define DBG_BUS_BLOCK_DATA_RESERVED_SHIFT 1
-};
-
-enum dbg_bus_clients {
- DBG_BUS_CLIENT_RBCN,
- DBG_BUS_CLIENT_RBCP,
- DBG_BUS_CLIENT_RBCR,
- DBG_BUS_CLIENT_RBCT,
- DBG_BUS_CLIENT_RBCU,
- DBG_BUS_CLIENT_RBCF,
- DBG_BUS_CLIENT_RBCX,
- DBG_BUS_CLIENT_RBCS,
- DBG_BUS_CLIENT_RBCH,
- DBG_BUS_CLIENT_RBCZ,
- DBG_BUS_CLIENT_OTHER_ENGINE,
- DBG_BUS_CLIENT_TIMESTAMP,
- DBG_BUS_CLIENT_CPU,
- DBG_BUS_CLIENT_RBCY,
- DBG_BUS_CLIENT_RBCQ,
- DBG_BUS_CLIENT_RBCM,
- DBG_BUS_CLIENT_RBCB,
- DBG_BUS_CLIENT_RBCW,
- DBG_BUS_CLIENT_RBCV,
- MAX_DBG_BUS_CLIENTS
-};
-
-/* Debug Bus constraint operation types */
-enum dbg_bus_constraint_ops {
- DBG_BUS_CONSTRAINT_OP_EQ,
- DBG_BUS_CONSTRAINT_OP_NE,
- DBG_BUS_CONSTRAINT_OP_LT,
- DBG_BUS_CONSTRAINT_OP_LTC,
- DBG_BUS_CONSTRAINT_OP_LE,
- DBG_BUS_CONSTRAINT_OP_LEC,
- DBG_BUS_CONSTRAINT_OP_GT,
- DBG_BUS_CONSTRAINT_OP_GTC,
- DBG_BUS_CONSTRAINT_OP_GE,
- DBG_BUS_CONSTRAINT_OP_GEC,
- MAX_DBG_BUS_CONSTRAINT_OPS
-};
-
-/* Debug Bus trigger state data */
-struct dbg_bus_trigger_state_data {
- u8 msg_len;
- u8 constraint_dword_mask;
- u8 storm_id;
- u8 reserved;
-};
-
-/* Debug Bus memory address */
-struct dbg_bus_mem_addr {
- u32 lo;
- u32 hi;
-};
-
-/* Debug Bus PCI buffer data */
-struct dbg_bus_pci_buf_data {
- struct dbg_bus_mem_addr phys_addr; /* PCI buffer physical address */
- struct dbg_bus_mem_addr virt_addr; /* PCI buffer virtual address */
- u32 size; /* PCI buffer size in bytes */
-};
-
-/* Debug Bus Storm EID range filter params */
-struct dbg_bus_storm_eid_range_params {
- u8 min; /* Minimal event ID to filter on */
- u8 max; /* Maximal event ID to filter on */
-};
-
-/* Debug Bus Storm EID mask filter params */
-struct dbg_bus_storm_eid_mask_params {
- u8 val; /* Event ID value */
- u8 mask; /* Event ID mask. 1s in the mask = dont care bits. */
-};
-
-/* Debug Bus Storm EID filter params */
-union dbg_bus_storm_eid_params {
- struct dbg_bus_storm_eid_range_params range;
- struct dbg_bus_storm_eid_mask_params mask;
-};
-
-/* Debug Bus Storm data */
-struct dbg_bus_storm_data {
- u8 enabled;
- u8 mode;
- u8 hw_id;
- u8 eid_filter_en;
- u8 eid_range_not_mask;
- u8 cid_filter_en;
- union dbg_bus_storm_eid_params eid_filter_params;
- u32 cid;
-};
-
-/* Debug Bus data */
-struct dbg_bus_data {
- u32 app_version;
- u8 state;
- u8 mode_256b_en;
- u8 num_enabled_blocks;
- u8 num_enabled_storms;
- u8 target;
- u8 one_shot_en;
- u8 grc_input_en;
- u8 timestamp_input_en;
- u8 filter_en;
- u8 adding_filter;
- u8 filter_pre_trigger;
- u8 filter_post_trigger;
- u8 trigger_en;
- u8 filter_constraint_dword_mask;
- u8 next_trigger_state;
- u8 next_constraint_id;
- struct dbg_bus_trigger_state_data trigger_states[3];
- u8 filter_msg_len;
- u8 rcv_from_other_engine;
- u8 blocks_dword_mask;
- u8 blocks_dword_overlap;
- u32 hw_id_mask;
- struct dbg_bus_pci_buf_data pci_buf;
- struct dbg_bus_block_data blocks[132];
- struct dbg_bus_storm_data storms[6];
-};
-
-/* Debug bus states */
-enum dbg_bus_states {
- DBG_BUS_STATE_IDLE,
- DBG_BUS_STATE_READY,
- DBG_BUS_STATE_RECORDING,
- DBG_BUS_STATE_STOPPED,
- MAX_DBG_BUS_STATES
-};
-
-/* Debug Bus Storm modes */
-enum dbg_bus_storm_modes {
- DBG_BUS_STORM_MODE_PRINTF,
- DBG_BUS_STORM_MODE_PRAM_ADDR,
- DBG_BUS_STORM_MODE_DRA_RW,
- DBG_BUS_STORM_MODE_DRA_W,
- DBG_BUS_STORM_MODE_LD_ST_ADDR,
- DBG_BUS_STORM_MODE_DRA_FSM,
- DBG_BUS_STORM_MODE_FAST_DBGMUX,
- DBG_BUS_STORM_MODE_RH,
- DBG_BUS_STORM_MODE_RH_WITH_STORE,
- DBG_BUS_STORM_MODE_FOC,
- DBG_BUS_STORM_MODE_EXT_STORE,
- MAX_DBG_BUS_STORM_MODES
-};
-
-/* Debug bus target IDs */
-enum dbg_bus_targets {
- DBG_BUS_TARGET_ID_INT_BUF,
- DBG_BUS_TARGET_ID_NIG,
- DBG_BUS_TARGET_ID_PCI,
- MAX_DBG_BUS_TARGETS
-};
-
-/* GRC Dump data */
-struct dbg_grc_data {
- u8 params_initialized;
- u8 reserved1;
- u16 reserved2;
- u32 param_val[48];
-};
-
-/* Debug GRC params */
-enum dbg_grc_params {
- DBG_GRC_PARAM_DUMP_TSTORM,
- DBG_GRC_PARAM_DUMP_MSTORM,
- DBG_GRC_PARAM_DUMP_USTORM,
- DBG_GRC_PARAM_DUMP_XSTORM,
- DBG_GRC_PARAM_DUMP_YSTORM,
- DBG_GRC_PARAM_DUMP_PSTORM,
- DBG_GRC_PARAM_DUMP_REGS,
- DBG_GRC_PARAM_DUMP_RAM,
- DBG_GRC_PARAM_DUMP_PBUF,
- DBG_GRC_PARAM_DUMP_IOR,
- DBG_GRC_PARAM_DUMP_VFC,
- DBG_GRC_PARAM_DUMP_CM_CTX,
- DBG_GRC_PARAM_DUMP_PXP,
- DBG_GRC_PARAM_DUMP_RSS,
- DBG_GRC_PARAM_DUMP_CAU,
- DBG_GRC_PARAM_DUMP_QM,
- DBG_GRC_PARAM_DUMP_MCP,
- DBG_GRC_PARAM_DUMP_DORQ,
- DBG_GRC_PARAM_DUMP_CFC,
- DBG_GRC_PARAM_DUMP_IGU,
- DBG_GRC_PARAM_DUMP_BRB,
- DBG_GRC_PARAM_DUMP_BTB,
- DBG_GRC_PARAM_DUMP_BMB,
- DBG_GRC_PARAM_RESERVD1,
- DBG_GRC_PARAM_DUMP_MULD,
- DBG_GRC_PARAM_DUMP_PRS,
- DBG_GRC_PARAM_DUMP_DMAE,
- DBG_GRC_PARAM_DUMP_TM,
- DBG_GRC_PARAM_DUMP_SDM,
- DBG_GRC_PARAM_DUMP_DIF,
- DBG_GRC_PARAM_DUMP_STATIC,
- DBG_GRC_PARAM_UNSTALL,
- DBG_GRC_PARAM_RESERVED2,
- DBG_GRC_PARAM_MCP_TRACE_META_SIZE,
- DBG_GRC_PARAM_EXCLUDE_ALL,
- DBG_GRC_PARAM_CRASH,
- DBG_GRC_PARAM_PARITY_SAFE,
- DBG_GRC_PARAM_DUMP_CM,
- DBG_GRC_PARAM_DUMP_PHY,
- DBG_GRC_PARAM_NO_MCP,
- DBG_GRC_PARAM_NO_FW_VER,
- DBG_GRC_PARAM_RESERVED3,
- DBG_GRC_PARAM_DUMP_MCP_HW_DUMP,
- DBG_GRC_PARAM_DUMP_ILT_CDUC,
- DBG_GRC_PARAM_DUMP_ILT_CDUT,
- DBG_GRC_PARAM_DUMP_CAU_EXT,
- MAX_DBG_GRC_PARAMS
-};
-
-/* Debug status codes */
-enum dbg_status {
- DBG_STATUS_OK,
- DBG_STATUS_APP_VERSION_NOT_SET,
- DBG_STATUS_UNSUPPORTED_APP_VERSION,
- DBG_STATUS_DBG_BLOCK_NOT_RESET,
- DBG_STATUS_INVALID_ARGS,
- DBG_STATUS_OUTPUT_ALREADY_SET,
- DBG_STATUS_INVALID_PCI_BUF_SIZE,
- DBG_STATUS_PCI_BUF_ALLOC_FAILED,
- DBG_STATUS_PCI_BUF_NOT_ALLOCATED,
- DBG_STATUS_INVALID_FILTER_TRIGGER_DWORDS,
- DBG_STATUS_NO_MATCHING_FRAMING_MODE,
- DBG_STATUS_VFC_READ_ERROR,
- DBG_STATUS_STORM_ALREADY_ENABLED,
- DBG_STATUS_STORM_NOT_ENABLED,
- DBG_STATUS_BLOCK_ALREADY_ENABLED,
- DBG_STATUS_BLOCK_NOT_ENABLED,
- DBG_STATUS_NO_INPUT_ENABLED,
- DBG_STATUS_NO_FILTER_TRIGGER_256B,
- DBG_STATUS_FILTER_ALREADY_ENABLED,
- DBG_STATUS_TRIGGER_ALREADY_ENABLED,
- DBG_STATUS_TRIGGER_NOT_ENABLED,
- DBG_STATUS_CANT_ADD_CONSTRAINT,
- DBG_STATUS_TOO_MANY_TRIGGER_STATES,
- DBG_STATUS_TOO_MANY_CONSTRAINTS,
- DBG_STATUS_RECORDING_NOT_STARTED,
- DBG_STATUS_DATA_DIDNT_TRIGGER,
- DBG_STATUS_NO_DATA_RECORDED,
- DBG_STATUS_DUMP_BUF_TOO_SMALL,
- DBG_STATUS_DUMP_NOT_CHUNK_ALIGNED,
- DBG_STATUS_UNKNOWN_CHIP,
- DBG_STATUS_VIRT_MEM_ALLOC_FAILED,
- DBG_STATUS_BLOCK_IN_RESET,
- DBG_STATUS_INVALID_TRACE_SIGNATURE,
- DBG_STATUS_INVALID_NVRAM_BUNDLE,
- DBG_STATUS_NVRAM_GET_IMAGE_FAILED,
- DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE,
- DBG_STATUS_NVRAM_READ_FAILED,
- DBG_STATUS_IDLE_CHK_PARSE_FAILED,
- DBG_STATUS_MCP_TRACE_BAD_DATA,
- DBG_STATUS_MCP_TRACE_NO_META,
- DBG_STATUS_MCP_COULD_NOT_HALT,
- DBG_STATUS_MCP_COULD_NOT_RESUME,
- DBG_STATUS_RESERVED0,
- DBG_STATUS_SEMI_FIFO_NOT_EMPTY,
- DBG_STATUS_IGU_FIFO_BAD_DATA,
- DBG_STATUS_MCP_COULD_NOT_MASK_PRTY,
- DBG_STATUS_FW_ASSERTS_PARSE_FAILED,
- DBG_STATUS_REG_FIFO_BAD_DATA,
- DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA,
- DBG_STATUS_DBG_ARRAY_NOT_SET,
- DBG_STATUS_RESERVED1,
- DBG_STATUS_NON_MATCHING_LINES,
- DBG_STATUS_INSUFFICIENT_HW_IDS,
- DBG_STATUS_DBG_BUS_IN_USE,
- DBG_STATUS_INVALID_STORM_DBG_MODE,
- DBG_STATUS_OTHER_ENGINE_BB_ONLY,
- DBG_STATUS_FILTER_SINGLE_HW_ID,
- DBG_STATUS_TRIGGER_SINGLE_HW_ID,
- DBG_STATUS_MISSING_TRIGGER_STATE_STORM,
- MAX_DBG_STATUS
-};
-
-/* Debug Storms IDs */
-enum dbg_storms {
- DBG_TSTORM_ID,
- DBG_MSTORM_ID,
- DBG_USTORM_ID,
- DBG_XSTORM_ID,
- DBG_YSTORM_ID,
- DBG_PSTORM_ID,
- MAX_DBG_STORMS
-};
-
-/* Idle Check data */
-struct idle_chk_data {
- u32 buf_size;
- u8 buf_size_set;
- u8 reserved1;
- u16 reserved2;
-};
-
-struct pretend_params {
- u8 split_type;
- u8 reserved;
- u16 split_id;
-};
-
-/* Debug Tools data (per HW function)
- */
-struct dbg_tools_data {
- struct dbg_grc_data grc;
- struct dbg_bus_data bus;
- struct idle_chk_data idle_chk;
- u8 mode_enable[40];
- u8 block_in_reset[132];
- u8 chip_id;
- u8 hw_type;
- u8 num_ports;
- u8 num_pfs_per_port;
- u8 num_vfs;
- u8 initialized;
- u8 use_dmae;
- u8 reserved;
- struct pretend_params pretend;
- u32 num_regs_read;
-};
-
-/* ILT Clients */
-enum ilt_clients {
- ILT_CLI_CDUC,
- ILT_CLI_CDUT,
- ILT_CLI_QM,
- ILT_CLI_TM,
- ILT_CLI_SRC,
- ILT_CLI_TSDM,
- ILT_CLI_RGFS,
- ILT_CLI_TGFS,
- MAX_ILT_CLIENTS
-};
-
/********************************/
/* HSI Init Functions constants */
/********************************/
@@ -2644,6 +1956,9 @@ struct init_nig_pri_tc_map_req {
/* QM per global RL init parameters */
struct init_qm_global_rl_params {
+ u8 type;
+ u8 reserved0;
+ u16 reserved1;
u32 rate_limit;
};
@@ -2658,18 +1973,33 @@ struct init_qm_port_params {
/* QM per-PQ init parameters */
struct init_qm_pq_params {
- u8 vport_id;
+ u16 vport_id;
+ u16 rl_id;
+ u8 rl_valid;
u8 tc_id;
u8 wrr_group;
- u8 rl_valid;
- u16 rl_id;
u8 port_id;
- u8 reserved;
+};
+
+/* QM per RL init parameters */
+struct init_qm_rl_params {
+ u32 vport_rl;
+ u8 vport_rl_type;
+ u8 reserved[3];
+};
+
+/* QM Rate Limiter types */
+enum init_qm_rl_type {
+ QM_RL_TYPE_NORMAL,
+ QM_RL_TYPE_QCN,
+ MAX_INIT_QM_RL_TYPE
};
/* QM per-vport init parameters */
struct init_qm_vport_params {
u16 wfq;
+ u16 reserved;
+ u16 tc_wfq[NUM_OF_TCS];
u16 first_tx_pq_id[NUM_OF_TCS];
};
@@ -2728,14 +2058,14 @@ struct fw_info_location {
};
enum init_modes {
- MODE_RESERVED,
+ MODE_BB_A0_DEPRECATED,
MODE_BB,
MODE_K2,
MODE_ASIC,
- MODE_RESERVED2,
- MODE_RESERVED3,
- MODE_RESERVED4,
- MODE_RESERVED5,
+ MODE_EMUL_REDUCED,
+ MODE_EMUL_FULL,
+ MODE_FPGA,
+ MODE_CHIPSIM,
MODE_SF,
MODE_MF_SD,
MODE_MF_SI,
@@ -2743,8 +2073,8 @@ enum init_modes {
MODE_PORTS_PER_ENG_2,
MODE_PORTS_PER_ENG_4,
MODE_100G,
- MODE_RESERVED6,
- MODE_RESERVED7,
+ MODE_SKIP_PRAM_INIT,
+ MODE_EMUL_MAC,
MAX_INIT_MODES
};
@@ -3009,706 +2339,6 @@ struct iro {
u16 size;
};
-/***************************** Public Functions *******************************/
-
-/**
- * @brief qed_dbg_set_bin_ptr - Sets a pointer to the binary data with debug
- * arrays.
- *
- * @param p_hwfn - HW device data
- * @param bin_ptr - a pointer to the binary data with debug arrays.
- */
-enum dbg_status qed_dbg_set_bin_ptr(struct qed_hwfn *p_hwfn,
- const u8 * const bin_ptr);
-
-/**
- * @brief qed_read_regs - Reads registers into a buffer (using GRC).
- *
- * @param p_hwfn - HW device data
- * @param p_ptt - Ptt window used for writing the registers.
- * @param buf - Destination buffer.
- * @param addr - Source GRC address in dwords.
- * @param len - Number of registers to read.
- */
-void qed_read_regs(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt, u32 *buf, u32 addr, u32 len);
-
-/**
- * @brief qed_read_fw_info - Reads FW info from the chip.
- *
- * The FW info contains FW-related information, such as the FW version,
- * FW image (main/L2B/kuku), FW timestamp, etc.
- * The FW info is read from the internal RAM of the first Storm that is not in
- * reset.
- *
- * @param p_hwfn - HW device data
- * @param p_ptt - Ptt window used for writing the registers.
- * @param fw_info - Out: a pointer to write the FW info into.
- *
- * @return true if the FW info was read successfully from one of the Storms,
- * or false if all Storms are in reset.
- */
-bool qed_read_fw_info(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt, struct fw_info *fw_info);
-/**
- * @brief qed_dbg_grc_config - Sets the value of a GRC parameter.
- *
- * @param p_hwfn - HW device data
- * @param grc_param - GRC parameter
- * @param val - Value to set.
- *
- * @return error if one of the following holds:
- * - the version wasn't set
- * - grc_param is invalid
- * - val is outside the allowed boundaries
- */
-enum dbg_status qed_dbg_grc_config(struct qed_hwfn *p_hwfn,
- enum dbg_grc_params grc_param, u32 val);
-
-/**
- * @brief qed_dbg_grc_set_params_default - Reverts all GRC parameters to their
- * default value.
- *
- * @param p_hwfn - HW device data
- */
-void qed_dbg_grc_set_params_default(struct qed_hwfn *p_hwfn);
-/**
- * @brief qed_dbg_grc_get_dump_buf_size - Returns the required buffer size for
- * GRC Dump.
- *
- * @param p_hwfn - HW device data
- * @param p_ptt - Ptt window used for writing the registers.
- * @param buf_size - OUT: required buffer size (in dwords) for the GRC Dump
- * data.
- *
- * @return error if one of the following holds:
- * - the version wasn't set
- * Otherwise, returns ok.
- */
-enum dbg_status qed_dbg_grc_get_dump_buf_size(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 *buf_size);
-
-/**
- * @brief qed_dbg_grc_dump - Dumps GRC data into the specified buffer.
- *
- * @param p_hwfn - HW device data
- * @param p_ptt - Ptt window used for writing the registers.
- * @param dump_buf - Pointer to write the collected GRC data into.
- * @param buf_size_in_dwords - Size of the specified buffer in dwords.
- * @param num_dumped_dwords - OUT: number of dumped dwords.
- *
- * @return error if one of the following holds:
- * - the version wasn't set
- * - the specified dump buffer is too small
- * Otherwise, returns ok.
- */
-enum dbg_status qed_dbg_grc_dump(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 *dump_buf,
- u32 buf_size_in_dwords,
- u32 *num_dumped_dwords);
-
-/**
- * @brief qed_dbg_idle_chk_get_dump_buf_size - Returns the required buffer size
- * for idle check results.
- *
- * @param p_hwfn - HW device data
- * @param p_ptt - Ptt window used for writing the registers.
- * @param buf_size - OUT: required buffer size (in dwords) for the idle check
- * data.
- *
- * @return error if one of the following holds:
- * - the version wasn't set
- * Otherwise, returns ok.
- */
-enum dbg_status qed_dbg_idle_chk_get_dump_buf_size(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 *buf_size);
-
-/**
- * @brief qed_dbg_idle_chk_dump - Performs idle check and writes the results
- * into the specified buffer.
- *
- * @param p_hwfn - HW device data
- * @param p_ptt - Ptt window used for writing the registers.
- * @param dump_buf - Pointer to write the idle check data into.
- * @param buf_size_in_dwords - Size of the specified buffer in dwords.
- * @param num_dumped_dwords - OUT: number of dumped dwords.
- *
- * @return error if one of the following holds:
- * - the version wasn't set
- * - the specified buffer is too small
- * Otherwise, returns ok.
- */
-enum dbg_status qed_dbg_idle_chk_dump(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 *dump_buf,
- u32 buf_size_in_dwords,
- u32 *num_dumped_dwords);
-
-/**
- * @brief qed_dbg_mcp_trace_get_dump_buf_size - Returns the required buffer size
- * for mcp trace results.
- *
- * @param p_hwfn - HW device data
- * @param p_ptt - Ptt window used for writing the registers.
- * @param buf_size - OUT: required buffer size (in dwords) for mcp trace data.
- *
- * @return error if one of the following holds:
- * - the version wasn't set
- * - the trace data in MCP scratchpad contain an invalid signature
- * - the bundle ID in NVRAM is invalid
- * - the trace meta data cannot be found (in NVRAM or image file)
- * Otherwise, returns ok.
- */
-enum dbg_status qed_dbg_mcp_trace_get_dump_buf_size(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 *buf_size);
-
-/**
- * @brief qed_dbg_mcp_trace_dump - Performs mcp trace and writes the results
- * into the specified buffer.
- *
- * @param p_hwfn - HW device data
- * @param p_ptt - Ptt window used for writing the registers.
- * @param dump_buf - Pointer to write the mcp trace data into.
- * @param buf_size_in_dwords - Size of the specified buffer in dwords.
- * @param num_dumped_dwords - OUT: number of dumped dwords.
- *
- * @return error if one of the following holds:
- * - the version wasn't set
- * - the specified buffer is too small
- * - the trace data in MCP scratchpad contain an invalid signature
- * - the bundle ID in NVRAM is invalid
- * - the trace meta data cannot be found (in NVRAM or image file)
- * - the trace meta data cannot be read (from NVRAM or image file)
- * Otherwise, returns ok.
- */
-enum dbg_status qed_dbg_mcp_trace_dump(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 *dump_buf,
- u32 buf_size_in_dwords,
- u32 *num_dumped_dwords);
-
-/**
- * @brief qed_dbg_reg_fifo_get_dump_buf_size - Returns the required buffer size
- * for grc trace fifo results.
- *
- * @param p_hwfn - HW device data
- * @param p_ptt - Ptt window used for writing the registers.
- * @param buf_size - OUT: required buffer size (in dwords) for reg fifo data.
- *
- * @return error if one of the following holds:
- * - the version wasn't set
- * Otherwise, returns ok.
- */
-enum dbg_status qed_dbg_reg_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 *buf_size);
-
-/**
- * @brief qed_dbg_reg_fifo_dump - Reads the reg fifo and writes the results into
- * the specified buffer.
- *
- * @param p_hwfn - HW device data
- * @param p_ptt - Ptt window used for writing the registers.
- * @param dump_buf - Pointer to write the reg fifo data into.
- * @param buf_size_in_dwords - Size of the specified buffer in dwords.
- * @param num_dumped_dwords - OUT: number of dumped dwords.
- *
- * @return error if one of the following holds:
- * - the version wasn't set
- * - the specified buffer is too small
- * - DMAE transaction failed
- * Otherwise, returns ok.
- */
-enum dbg_status qed_dbg_reg_fifo_dump(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 *dump_buf,
- u32 buf_size_in_dwords,
- u32 *num_dumped_dwords);
-
-/**
- * @brief qed_dbg_igu_fifo_get_dump_buf_size - Returns the required buffer size
- * for the IGU fifo results.
- *
- * @param p_hwfn - HW device data
- * @param p_ptt - Ptt window used for writing the registers.
- * @param buf_size - OUT: required buffer size (in dwords) for the IGU fifo
- * data.
- *
- * @return error if one of the following holds:
- * - the version wasn't set
- * Otherwise, returns ok.
- */
-enum dbg_status qed_dbg_igu_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 *buf_size);
-
-/**
- * @brief qed_dbg_igu_fifo_dump - Reads the IGU fifo and writes the results into
- * the specified buffer.
- *
- * @param p_hwfn - HW device data
- * @param p_ptt - Ptt window used for writing the registers.
- * @param dump_buf - Pointer to write the IGU fifo data into.
- * @param buf_size_in_dwords - Size of the specified buffer in dwords.
- * @param num_dumped_dwords - OUT: number of dumped dwords.
- *
- * @return error if one of the following holds:
- * - the version wasn't set
- * - the specified buffer is too small
- * - DMAE transaction failed
- * Otherwise, returns ok.
- */
-enum dbg_status qed_dbg_igu_fifo_dump(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 *dump_buf,
- u32 buf_size_in_dwords,
- u32 *num_dumped_dwords);
-
-/**
- * @brief qed_dbg_protection_override_get_dump_buf_size - Returns the required
- * buffer size for protection override window results.
- *
- * @param p_hwfn - HW device data
- * @param p_ptt - Ptt window used for writing the registers.
- * @param buf_size - OUT: required buffer size (in dwords) for protection
- * override data.
- *
- * @return error if one of the following holds:
- * - the version wasn't set
- * Otherwise, returns ok.
- */
-enum dbg_status
-qed_dbg_protection_override_get_dump_buf_size(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 *buf_size);
-/**
- * @brief qed_dbg_protection_override_dump - Reads protection override window
- * entries and writes the results into the specified buffer.
- *
- * @param p_hwfn - HW device data
- * @param p_ptt - Ptt window used for writing the registers.
- * @param dump_buf - Pointer to write the protection override data into.
- * @param buf_size_in_dwords - Size of the specified buffer in dwords.
- * @param num_dumped_dwords - OUT: number of dumped dwords.
- *
- * @return error if one of the following holds:
- * - the version wasn't set
- * - the specified buffer is too small
- * - DMAE transaction failed
- * Otherwise, returns ok.
- */
-enum dbg_status qed_dbg_protection_override_dump(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 *dump_buf,
- u32 buf_size_in_dwords,
- u32 *num_dumped_dwords);
-/**
- * @brief qed_dbg_fw_asserts_get_dump_buf_size - Returns the required buffer
- * size for FW Asserts results.
- *
- * @param p_hwfn - HW device data
- * @param p_ptt - Ptt window used for writing the registers.
- * @param buf_size - OUT: required buffer size (in dwords) for FW Asserts data.
- *
- * @return error if one of the following holds:
- * - the version wasn't set
- * Otherwise, returns ok.
- */
-enum dbg_status qed_dbg_fw_asserts_get_dump_buf_size(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 *buf_size);
-/**
- * @brief qed_dbg_fw_asserts_dump - Reads the FW Asserts and writes the results
- * into the specified buffer.
- *
- * @param p_hwfn - HW device data
- * @param p_ptt - Ptt window used for writing the registers.
- * @param dump_buf - Pointer to write the FW Asserts data into.
- * @param buf_size_in_dwords - Size of the specified buffer in dwords.
- * @param num_dumped_dwords - OUT: number of dumped dwords.
- *
- * @return error if one of the following holds:
- * - the version wasn't set
- * - the specified buffer is too small
- * Otherwise, returns ok.
- */
-enum dbg_status qed_dbg_fw_asserts_dump(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 *dump_buf,
- u32 buf_size_in_dwords,
- u32 *num_dumped_dwords);
-
-/**
- * @brief qed_dbg_read_attn - Reads the attention registers of the specified
- * block and type, and writes the results into the specified buffer.
- *
- * @param p_hwfn - HW device data
- * @param p_ptt - Ptt window used for writing the registers.
- * @param block - Block ID.
- * @param attn_type - Attention type.
- * @param clear_status - Indicates if the attention status should be cleared.
- * @param results - OUT: Pointer to write the read results into
- *
- * @return error if one of the following holds:
- * - the version wasn't set
- * Otherwise, returns ok.
- */
-enum dbg_status qed_dbg_read_attn(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- enum block_id block,
- enum dbg_attn_type attn_type,
- bool clear_status,
- struct dbg_attn_block_result *results);
-
-/**
- * @brief qed_dbg_print_attn - Prints attention registers values in the
- * specified results struct.
- *
- * @param p_hwfn
- * @param results - Pointer to the attention read results
- *
- * @return error if one of the following holds:
- * - the version wasn't set
- * Otherwise, returns ok.
- */
-enum dbg_status qed_dbg_print_attn(struct qed_hwfn *p_hwfn,
- struct dbg_attn_block_result *results);
-
-/******************************* Data Types **********************************/
-
-struct mcp_trace_format {
- u32 data;
-#define MCP_TRACE_FORMAT_MODULE_MASK 0x0000ffff
-#define MCP_TRACE_FORMAT_MODULE_OFFSET 0
-#define MCP_TRACE_FORMAT_LEVEL_MASK 0x00030000
-#define MCP_TRACE_FORMAT_LEVEL_OFFSET 16
-#define MCP_TRACE_FORMAT_P1_SIZE_MASK 0x000c0000
-#define MCP_TRACE_FORMAT_P1_SIZE_OFFSET 18
-#define MCP_TRACE_FORMAT_P2_SIZE_MASK 0x00300000
-#define MCP_TRACE_FORMAT_P2_SIZE_OFFSET 20
-#define MCP_TRACE_FORMAT_P3_SIZE_MASK 0x00c00000
-#define MCP_TRACE_FORMAT_P3_SIZE_OFFSET 22
-#define MCP_TRACE_FORMAT_LEN_MASK 0xff000000
-#define MCP_TRACE_FORMAT_LEN_OFFSET 24
-
- char *format_str;
-};
-
-/* MCP Trace Meta data structure */
-struct mcp_trace_meta {
- u32 modules_num;
- char **modules;
- u32 formats_num;
- struct mcp_trace_format *formats;
- bool is_allocated;
-};
-
-/* Debug Tools user data */
-struct dbg_tools_user_data {
- struct mcp_trace_meta mcp_trace_meta;
- const u32 *mcp_trace_user_meta_buf;
-};
-
-/******************************** Constants **********************************/
-
-#define MAX_NAME_LEN 16
-
-/***************************** Public Functions *******************************/
-
-/**
- * @brief qed_dbg_user_set_bin_ptr - Sets a pointer to the binary data with
- * debug arrays.
- *
- * @param p_hwfn - HW device data
- * @param bin_ptr - a pointer to the binary data with debug arrays.
- */
-enum dbg_status qed_dbg_user_set_bin_ptr(struct qed_hwfn *p_hwfn,
- const u8 * const bin_ptr);
-
-/**
- * @brief qed_dbg_alloc_user_data - Allocates user debug data.
- *
- * @param p_hwfn - HW device data
- * @param user_data_ptr - OUT: a pointer to the allocated memory.
- */
-enum dbg_status qed_dbg_alloc_user_data(struct qed_hwfn *p_hwfn,
- void **user_data_ptr);
-
-/**
- * @brief qed_dbg_get_status_str - Returns a string for the specified status.
- *
- * @param status - a debug status code.
- *
- * @return a string for the specified status
- */
-const char *qed_dbg_get_status_str(enum dbg_status status);
-
-/**
- * @brief qed_get_idle_chk_results_buf_size - Returns the required buffer size
- * for idle check results (in bytes).
- *
- * @param p_hwfn - HW device data
- * @param dump_buf - idle check dump buffer.
- * @param num_dumped_dwords - number of dwords that were dumped.
- * @param results_buf_size - OUT: required buffer size (in bytes) for the parsed
- * results.
- *
- * @return error if the parsing fails, ok otherwise.
- */
-enum dbg_status qed_get_idle_chk_results_buf_size(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- u32 num_dumped_dwords,
- u32 *results_buf_size);
-/**
- * @brief qed_print_idle_chk_results - Prints idle check results
- *
- * @param p_hwfn - HW device data
- * @param dump_buf - idle check dump buffer.
- * @param num_dumped_dwords - number of dwords that were dumped.
- * @param results_buf - buffer for printing the idle check results.
- * @param num_errors - OUT: number of errors found in idle check.
- * @param num_warnings - OUT: number of warnings found in idle check.
- *
- * @return error if the parsing fails, ok otherwise.
- */
-enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- u32 num_dumped_dwords,
- char *results_buf,
- u32 *num_errors,
- u32 *num_warnings);
-
-/**
- * @brief qed_dbg_mcp_trace_set_meta_data - Sets the MCP Trace meta data.
- *
- * Needed in case the MCP Trace dump doesn't contain the meta data (e.g. due to
- * no NVRAM access).
- *
- * @param data - pointer to MCP Trace meta data
- * @param size - size of MCP Trace meta data in dwords
- */
-void qed_dbg_mcp_trace_set_meta_data(struct qed_hwfn *p_hwfn,
- const u32 *meta_buf);
-
-/**
- * @brief qed_get_mcp_trace_results_buf_size - Returns the required buffer size
- * for MCP Trace results (in bytes).
- *
- * @param p_hwfn - HW device data
- * @param dump_buf - MCP Trace dump buffer.
- * @param num_dumped_dwords - number of dwords that were dumped.
- * @param results_buf_size - OUT: required buffer size (in bytes) for the parsed
- * results.
- *
- * @return error if the parsing fails, ok otherwise.
- */
-enum dbg_status qed_get_mcp_trace_results_buf_size(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- u32 num_dumped_dwords,
- u32 *results_buf_size);
-
-/**
- * @brief qed_print_mcp_trace_results - Prints MCP Trace results
- *
- * @param p_hwfn - HW device data
- * @param dump_buf - mcp trace dump buffer, starting from the header.
- * @param num_dumped_dwords - number of dwords that were dumped.
- * @param results_buf - buffer for printing the mcp trace results.
- *
- * @return error if the parsing fails, ok otherwise.
- */
-enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- u32 num_dumped_dwords,
- char *results_buf);
-
-/**
- * @brief qed_print_mcp_trace_results_cont - Prints MCP Trace results, and
- * keeps the MCP trace meta data allocated, to support continuous MCP Trace
- * parsing. After the continuous parsing ends, mcp_trace_free_meta_data should
- * be called to free the meta data.
- *
- * @param p_hwfn - HW device data
- * @param dump_buf - mcp trace dump buffer, starting from the header.
- * @param results_buf - buffer for printing the mcp trace results.
- *
- * @return error if the parsing fails, ok otherwise.
- */
-enum dbg_status qed_print_mcp_trace_results_cont(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- char *results_buf);
-
-/**
- * @brief print_mcp_trace_line - Prints MCP Trace results for a single line
- *
- * @param p_hwfn - HW device data
- * @param dump_buf - mcp trace dump buffer, starting from the header.
- * @param num_dumped_bytes - number of bytes that were dumped.
- * @param results_buf - buffer for printing the mcp trace results.
- *
- * @return error if the parsing fails, ok otherwise.
- */
-enum dbg_status qed_print_mcp_trace_line(struct qed_hwfn *p_hwfn,
- u8 *dump_buf,
- u32 num_dumped_bytes,
- char *results_buf);
-
-/**
- * @brief mcp_trace_free_meta_data - Frees the MCP Trace meta data.
- * Should be called after continuous MCP Trace parsing.
- *
- * @param p_hwfn - HW device data
- */
-void qed_mcp_trace_free_meta_data(struct qed_hwfn *p_hwfn);
-
-/**
- * @brief qed_get_reg_fifo_results_buf_size - Returns the required buffer size
- * for reg_fifo results (in bytes).
- *
- * @param p_hwfn - HW device data
- * @param dump_buf - reg fifo dump buffer.
- * @param num_dumped_dwords - number of dwords that were dumped.
- * @param results_buf_size - OUT: required buffer size (in bytes) for the parsed
- * results.
- *
- * @return error if the parsing fails, ok otherwise.
- */
-enum dbg_status qed_get_reg_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- u32 num_dumped_dwords,
- u32 *results_buf_size);
-
-/**
- * @brief qed_print_reg_fifo_results - Prints reg fifo results
- *
- * @param p_hwfn - HW device data
- * @param dump_buf - reg fifo dump buffer, starting from the header.
- * @param num_dumped_dwords - number of dwords that were dumped.
- * @param results_buf - buffer for printing the reg fifo results.
- *
- * @return error if the parsing fails, ok otherwise.
- */
-enum dbg_status qed_print_reg_fifo_results(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- u32 num_dumped_dwords,
- char *results_buf);
-
-/**
- * @brief qed_get_igu_fifo_results_buf_size - Returns the required buffer size
- * for igu_fifo results (in bytes).
- *
- * @param p_hwfn - HW device data
- * @param dump_buf - IGU fifo dump buffer.
- * @param num_dumped_dwords - number of dwords that were dumped.
- * @param results_buf_size - OUT: required buffer size (in bytes) for the parsed
- * results.
- *
- * @return error if the parsing fails, ok otherwise.
- */
-enum dbg_status qed_get_igu_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- u32 num_dumped_dwords,
- u32 *results_buf_size);
-
-/**
- * @brief qed_print_igu_fifo_results - Prints IGU fifo results
- *
- * @param p_hwfn - HW device data
- * @param dump_buf - IGU fifo dump buffer, starting from the header.
- * @param num_dumped_dwords - number of dwords that were dumped.
- * @param results_buf - buffer for printing the IGU fifo results.
- *
- * @return error if the parsing fails, ok otherwise.
- */
-enum dbg_status qed_print_igu_fifo_results(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- u32 num_dumped_dwords,
- char *results_buf);
-
-/**
- * @brief qed_get_protection_override_results_buf_size - Returns the required
- * buffer size for protection override results (in bytes).
- *
- * @param p_hwfn - HW device data
- * @param dump_buf - protection override dump buffer.
- * @param num_dumped_dwords - number of dwords that were dumped.
- * @param results_buf_size - OUT: required buffer size (in bytes) for the parsed
- * results.
- *
- * @return error if the parsing fails, ok otherwise.
- */
-enum dbg_status
-qed_get_protection_override_results_buf_size(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- u32 num_dumped_dwords,
- u32 *results_buf_size);
-
-/**
- * @brief qed_print_protection_override_results - Prints protection override
- * results.
- *
- * @param p_hwfn - HW device data
- * @param dump_buf - protection override dump buffer, starting from the header.
- * @param num_dumped_dwords - number of dwords that were dumped.
- * @param results_buf - buffer for printing the reg fifo results.
- *
- * @return error if the parsing fails, ok otherwise.
- */
-enum dbg_status qed_print_protection_override_results(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- u32 num_dumped_dwords,
- char *results_buf);
-
-/**
- * @brief qed_get_fw_asserts_results_buf_size - Returns the required buffer size
- * for FW Asserts results (in bytes).
- *
- * @param p_hwfn - HW device data
- * @param dump_buf - FW Asserts dump buffer.
- * @param num_dumped_dwords - number of dwords that were dumped.
- * @param results_buf_size - OUT: required buffer size (in bytes) for the parsed
- * results.
- *
- * @return error if the parsing fails, ok otherwise.
- */
-enum dbg_status qed_get_fw_asserts_results_buf_size(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- u32 num_dumped_dwords,
- u32 *results_buf_size);
-
-/**
- * @brief qed_print_fw_asserts_results - Prints FW Asserts results
- *
- * @param p_hwfn - HW device data
- * @param dump_buf - FW Asserts dump buffer, starting from the header.
- * @param num_dumped_dwords - number of dwords that were dumped.
- * @param results_buf - buffer for printing the FW Asserts results.
- *
- * @return error if the parsing fails, ok otherwise.
- */
-enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- u32 num_dumped_dwords,
- char *results_buf);
-
-/**
- * @brief qed_dbg_parse_attn - Parses and prints attention registers values in
- * the specified results struct.
- *
- * @param p_hwfn - HW device data
- * @param results - Pointer to the attention read results
- *
- * @return error if one of the following holds:
- * - the version wasn't set
- * Otherwise, returns ok.
- */
-enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn,
- struct dbg_attn_block_result *results);
-
/* Win 2 */
#define GTT_BAR0_MAP_REG_IGU_CMD 0x00f000UL
@@ -3745,19 +2375,28 @@ enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn,
/* Win 13 */
#define GTT_BAR0_MAP_REG_PSDM_RAM 0x01a000UL
+/* Returns the VOQ based on port and TC */
+#define VOQ(port, tc, max_phys_tcs_per_port) ((tc) == \
+ PURE_LB_TC ? NUM_OF_PHYS_TCS *\
+ MAX_NUM_PORTS_BB + \
+ (port) : (port) * \
+ (max_phys_tcs_per_port) + (tc))
+
+struct init_qm_pq_params;
+
/**
- * @brief qed_qm_pf_mem_size - prepare QM ILT sizes
+ * qed_qm_pf_mem_size(): Prepare QM ILT sizes.
*
- * Returns the required host memory size in 4KB units.
- * Must be called before all QM init HSI functions.
+ * @num_pf_cids: Number of connections used by this PF.
+ * @num_vf_cids: Number of connections used by VFs of this PF.
+ * @num_tids: Number of tasks used by this PF.
+ * @num_pf_pqs: Number of PQs used by this PF.
+ * @num_vf_pqs: Number of PQs used by VFs of this PF.
*
- * @param num_pf_cids - number of connections used by this PF
- * @param num_vf_cids - number of connections used by VFs of this PF
- * @param num_tids - number of tasks used by this PF
- * @param num_pf_pqs - number of PQs used by this PF
- * @param num_vf_pqs - number of PQs used by VFs of this PF
+ * Return: The required host memory size in 4KB units.
*
- * @return The required host memory size in 4KB units.
+ * Returns the required host memory size in 4KB units.
+ * Must be called before all QM init HSI functions.
*/
u32 qed_qm_pf_mem_size(u32 num_pf_cids,
u32 num_vf_cids,
@@ -3771,8 +2410,19 @@ struct qed_qm_common_rt_init_params {
bool global_rl_en;
bool vport_wfq_en;
struct init_qm_port_params *port_params;
+ struct init_qm_global_rl_params
+ global_rl_params[COMMON_MAX_QM_GLOBAL_RLS];
};
+/**
+ * qed_qm_common_rt_init(): Prepare QM runtime init values for the
+ * engine phase.
+ *
+ * @p_hwfn: HW device data.
+ * @p_params: Parameters.
+ *
+ * Return: 0 on success, -1 on error.
+ */
int qed_qm_common_rt_init(struct qed_hwfn *p_hwfn,
struct qed_qm_common_rt_init_params *p_params);
@@ -3789,85 +2439,116 @@ struct qed_qm_pf_rt_init_params {
u16 num_vf_pqs;
u16 start_vport;
u16 num_vports;
+ u16 start_rl;
+ u16 num_rls;
u16 pf_wfq;
u32 pf_rl;
+ u32 link_speed;
struct init_qm_pq_params *pq_params;
struct init_qm_vport_params *vport_params;
+ struct init_qm_rl_params *rl_params;
};
+/**
+ * qed_qm_pf_rt_init(): Prepare QM runtime init values for the PF phase.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers
+ * @p_params: Parameters.
+ *
+ * Return: 0 on success, -1 on error.
+ */
int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- struct qed_qm_pf_rt_init_params *p_params);
+ struct qed_ptt *p_ptt,
+ struct qed_qm_pf_rt_init_params *p_params);
/**
- * @brief qed_init_pf_wfq - Initializes the WFQ weight of the specified PF
+ * qed_init_pf_wfq(): Initializes the WFQ weight of the specified PF.
*
- * @param p_hwfn
- * @param p_ptt - ptt window used for writing the registers
- * @param pf_id - PF ID
- * @param pf_wfq - WFQ weight. Must be non-zero.
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers
+ * @pf_id: PF ID
+ * @pf_wfq: WFQ weight. Must be non-zero.
*
- * @return 0 on success, -1 on error.
+ * Return: 0 on success, -1 on error.
*/
int qed_init_pf_wfq(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u8 pf_id, u16 pf_wfq);
/**
- * @brief qed_init_pf_rl - Initializes the rate limit of the specified PF
+ * qed_init_pf_rl(): Initializes the rate limit of the specified PF
*
- * @param p_hwfn
- * @param p_ptt - ptt window used for writing the registers
- * @param pf_id - PF ID
- * @param pf_rl - rate limit in Mb/sec units
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @pf_id: PF ID.
+ * @pf_rl: rate limit in Mb/sec units
*
- * @return 0 on success, -1 on error.
+ * Return: 0 on success, -1 on error.
*/
int qed_init_pf_rl(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u8 pf_id, u32 pf_rl);
/**
- * @brief qed_init_vport_wfq Initializes the WFQ weight of the specified VPORT
+ * qed_init_vport_wfq(): Initializes the WFQ weight of the specified VPORT
*
- * @param p_hwfn
- * @param p_ptt - ptt window used for writing the registers
- * @param first_tx_pq_id- An array containing the first Tx PQ ID associated
- * with the VPORT for each TC. This array is filled by
- * qed_qm_pf_rt_init
- * @param vport_wfq - WFQ weight. Must be non-zero.
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers
+ * @first_tx_pq_id: An array containing the first Tx PQ ID associated
+ * with the VPORT for each TC. This array is filled by
+ * qed_qm_pf_rt_init
+ * @wfq: WFQ weight. Must be non-zero.
*
- * @return 0 on success, -1 on error.
+ * Return: 0 on success, -1 on error.
*/
int qed_init_vport_wfq(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u16 first_tx_pq_id[NUM_OF_TCS], u16 wfq);
/**
- * @brief qed_init_global_rl - Initializes the rate limit of the specified
- * rate limiter
+ * qed_init_vport_tc_wfq(): Initializes the WFQ weight of the specified
+ * VPORT and TC.
*
- * @param p_hwfn
- * @param p_ptt - ptt window used for writing the registers
- * @param rl_id - RL ID
- * @param rate_limit - rate limit in Mb/sec units
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @first_tx_pq_id: The first Tx PQ ID associated with the VPORT and TC.
+ * (filled by qed_qm_pf_rt_init).
+ * @weight: VPORT+TC WFQ weight.
*
- * @return 0 on success, -1 on error.
+ * Return: 0 on success, -1 on error.
+ */
+int qed_init_vport_tc_wfq(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u16 first_tx_pq_id, u16 weight);
+
+/**
+ * qed_init_global_rl(): Initializes the rate limit of the specified
+ * rate limiter.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @rl_id: RL ID.
+ * @rate_limit: Rate limit in Mb/sec units
+ * @vport_rl_type: Vport RL type.
+ *
+ * Return: 0 on success, -1 on error.
*/
int qed_init_global_rl(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
- u16 rl_id, u32 rate_limit);
+ u16 rl_id, u32 rate_limit,
+ enum init_qm_rl_type vport_rl_type);
/**
- * @brief qed_send_qm_stop_cmd Sends a stop command to the QM
+ * qed_send_qm_stop_cmd(): Sends a stop command to the QM.
*
- * @param p_hwfn
- * @param p_ptt
- * @param is_release_cmd - true for release, false for stop.
- * @param is_tx_pq - true for Tx PQs, false for Other PQs.
- * @param start_pq - first PQ ID to stop
- * @param num_pqs - Number of PQs to stop, starting from start_pq.
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @is_release_cmd: true for release, false for stop.
+ * @is_tx_pq: true for Tx PQs, false for Other PQs.
+ * @start_pq: first PQ ID to stop
+ * @num_pqs: Number of PQs to stop, starting from start_pq.
*
- * @return bool, true if successful, false if timeout occurred while waiting for
- * QM command done.
+ * Return: Bool, true if successful, false if timeout occurred while waiting
+ * for QM command done.
*/
bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
@@ -3875,53 +2556,64 @@ bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
bool is_tx_pq, u16 start_pq, u16 num_pqs);
/**
- * @brief qed_set_vxlan_dest_port - initializes vxlan tunnel destination udp port
+ * qed_set_vxlan_dest_port(): Initializes vxlan tunnel destination udp port.
*
- * @param p_hwfn
- * @param p_ptt - ptt window used for writing the registers.
- * @param dest_port - vxlan destination udp port.
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @dest_port: vxlan destination udp port.
+ *
+ * Return: Void.
*/
void qed_set_vxlan_dest_port(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u16 dest_port);
/**
- * @brief qed_set_vxlan_enable - enable or disable VXLAN tunnel in HW
+ * qed_set_vxlan_enable(): Enable or disable VXLAN tunnel in HW.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @vxlan_enable: vxlan enable flag.
*
- * @param p_hwfn
- * @param p_ptt - ptt window used for writing the registers.
- * @param vxlan_enable - vxlan enable flag.
+ * Return: Void.
*/
void qed_set_vxlan_enable(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, bool vxlan_enable);
/**
- * @brief qed_set_gre_enable - enable or disable GRE tunnel in HW
+ * qed_set_gre_enable(): Enable or disable GRE tunnel in HW.
*
- * @param p_hwfn
- * @param p_ptt - ptt window used for writing the registers.
- * @param eth_gre_enable - eth GRE enable enable flag.
- * @param ip_gre_enable - IP GRE enable enable flag.
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @eth_gre_enable: Eth GRE enable flag.
+ * @ip_gre_enable: IP GRE enable flag.
+ *
+ * Return: Void.
*/
void qed_set_gre_enable(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
bool eth_gre_enable, bool ip_gre_enable);
/**
- * @brief qed_set_geneve_dest_port - initializes geneve tunnel destination udp port
+ * qed_set_geneve_dest_port(): Initializes geneve tunnel destination udp port
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @dest_port: Geneve destination udp port.
*
- * @param p_hwfn
- * @param p_ptt - ptt window used for writing the registers.
- * @param dest_port - geneve destination udp port.
+ * Retur: Void.
*/
void qed_set_geneve_dest_port(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u16 dest_port);
/**
- * @brief qed_set_gre_enable - enable or disable GRE tunnel in HW
+ * qed_set_geneve_enable(): Enable or disable GRE tunnel in HW.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @eth_geneve_enable: Eth GENEVE enable flag.
+ * @ip_geneve_enable: IP GENEVE enable flag.
*
- * @param p_ptt - ptt window used for writing the registers.
- * @param eth_geneve_enable - eth GENEVE enable enable flag.
- * @param ip_geneve_enable - IP GENEVE enable enable flag.
+ * Return: Void.
*/
void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
@@ -3931,25 +2623,29 @@ void qed_set_vxlan_no_l2_enable(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, bool enable);
/**
- * @brief qed_gft_disable - Disable GFT
+ * qed_gft_disable(): Disable GFT.
*
- * @param p_hwfn
- * @param p_ptt - ptt window used for writing the registers.
- * @param pf_id - pf on which to disable GFT.
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @pf_id: PF on which to disable GFT.
+ *
+ * Return: Void.
*/
void qed_gft_disable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 pf_id);
/**
- * @brief qed_gft_config - Enable and configure HW for GFT
+ * qed_gft_config(): Enable and configure HW for GFT.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @pf_id: PF on which to enable GFT.
+ * @tcp: Set profile tcp packets.
+ * @udp: Set profile udp packet.
+ * @ipv4: Set profile ipv4 packet.
+ * @ipv6: Set profile ipv6 packet.
+ * @profile_type: Define packet same fields. Use enum gft_profile_type.
*
- * @param p_hwfn - HW device data
- * @param p_ptt - ptt window used for writing the registers.
- * @param pf_id - pf on which to enable GFT.
- * @param tcp - set profile tcp packets.
- * @param udp - set profile udp packet.
- * @param ipv4 - set profile ipv4 packet.
- * @param ipv6 - set profile ipv6 packet.
- * @param profile_type - define packet same fields. Use enum gft_profile_type.
+ * Return: Void.
*/
void qed_gft_config(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
@@ -3959,438 +2655,135 @@ void qed_gft_config(struct qed_hwfn *p_hwfn,
bool ipv4, bool ipv6, enum gft_profile_type profile_type);
/**
- * @brief qed_enable_context_validation - Enable and configure context
- * validation.
+ * qed_enable_context_validation(): Enable and configure context
+ * validation.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
*
- * @param p_hwfn
- * @param p_ptt - ptt window used for writing the registers.
+ * Return: Void.
*/
void qed_enable_context_validation(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
/**
- * @brief qed_calc_session_ctx_validation - Calcualte validation byte for
- * session context.
+ * qed_calc_session_ctx_validation(): Calcualte validation byte for
+ * session context.
*
- * @param p_ctx_mem - pointer to context memory.
- * @param ctx_size - context size.
- * @param ctx_type - context type.
- * @param cid - context cid.
+ * @p_ctx_mem: Pointer to context memory.
+ * @ctx_size: Context size.
+ * @ctx_type: Context type.
+ * @cid: Context cid.
+ *
+ * Return: Void.
*/
void qed_calc_session_ctx_validation(void *p_ctx_mem,
u16 ctx_size, u8 ctx_type, u32 cid);
/**
- * @brief qed_calc_task_ctx_validation - Calcualte validation byte for task
- * context.
+ * qed_calc_task_ctx_validation(): Calcualte validation byte for task
+ * context.
+ *
+ * @p_ctx_mem: Pointer to context memory.
+ * @ctx_size: Context size.
+ * @ctx_type: Context type.
+ * @tid: Context tid.
*
- * @param p_ctx_mem - pointer to context memory.
- * @param ctx_size - context size.
- * @param ctx_type - context type.
- * @param tid - context tid.
+ * Return: Void.
*/
void qed_calc_task_ctx_validation(void *p_ctx_mem,
u16 ctx_size, u8 ctx_type, u32 tid);
/**
- * @brief qed_memset_session_ctx - Memset session context to 0 while
- * preserving validation bytes.
+ * qed_memset_session_ctx(): Memset session context to 0 while
+ * preserving validation bytes.
+ *
+ * @p_ctx_mem: Pointer to context memory.
+ * @ctx_size: Size to initialzie.
+ * @ctx_type: Context type.
*
- * @param p_hwfn -
- * @param p_ctx_mem - pointer to context memory.
- * @param ctx_size - size to initialzie.
- * @param ctx_type - context type.
+ * Return: Void.
*/
void qed_memset_session_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type);
/**
- * @brief qed_memset_task_ctx - Memset task context to 0 while preserving
- * validation bytes.
+ * qed_memset_task_ctx(): Memset task context to 0 while preserving
+ * validation bytes.
*
- * @param p_ctx_mem - pointer to context memory.
- * @param ctx_size - size to initialzie.
- * @param ctx_type - context type.
+ * @p_ctx_mem: Pointer to context memory.
+ * @ctx_size: size to initialzie.
+ * @ctx_type: context type.
+ *
+ * Return: Void.
*/
void qed_memset_task_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type);
#define NUM_STORMS 6
/**
- * @brief qed_set_rdma_error_level - Sets the RDMA assert level.
- * If the severity of the error will be
- * above the level, the FW will assert.
- * @param p_hwfn - HW device data
- * @param p_ptt - ptt window used for writing the registers
- * @param assert_level - An array of assert levels for each storm.
+ * qed_set_rdma_error_level(): Sets the RDMA assert level.
+ * If the severity of the error will be
+ * above the level, the FW will assert.
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @assert_level: An array of assert levels for each storm.
*
+ * Return: Void.
*/
void qed_set_rdma_error_level(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u8 assert_level[NUM_STORMS]);
/**
- * @brief qed_fw_overlay_mem_alloc - Allocates and fills the FW overlay memory.
+ * qed_fw_overlay_mem_alloc(): Allocates and fills the FW overlay memory.
*
- * @param p_hwfn - HW device data
- * @param fw_overlay_in_buf - the input FW overlay buffer.
- * @param buf_size - the size of the input FW overlay buffer in bytes.
- * must be aligned to dwords.
- * @param fw_overlay_out_mem - OUT: a pointer to the allocated overlays memory.
+ * @p_hwfn: HW device data.
+ * @fw_overlay_in_buf: The input FW overlay buffer.
+ * @buf_size_in_bytes: The size of the input FW overlay buffer in bytes.
+ * must be aligned to dwords.
*
- * @return a pointer to the allocated overlays memory,
+ * Return: A pointer to the allocated overlays memory,
* or NULL in case of failures.
*/
struct phys_mem_desc *
qed_fw_overlay_mem_alloc(struct qed_hwfn *p_hwfn,
- const u32 * const fw_overlay_in_buf,
+ const u32 *const fw_overlay_in_buf,
u32 buf_size_in_bytes);
/**
- * @brief qed_fw_overlay_init_ram - Initializes the FW overlay RAM.
+ * qed_fw_overlay_init_ram(): Initializes the FW overlay RAM.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @fw_overlay_mem: the allocated FW overlay memory.
*
- * @param p_hwfn - HW device data.
- * @param p_ptt - ptt window used for writing the registers.
- * @param fw_overlay_mem - the allocated FW overlay memory.
+ * Return: Void.
*/
void qed_fw_overlay_init_ram(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct phys_mem_desc *fw_overlay_mem);
/**
- * @brief qed_fw_overlay_mem_free - Frees the FW overlay memory.
+ * qed_fw_overlay_mem_free(): Frees the FW overlay memory.
*
- * @param p_hwfn - HW device data.
- * @param fw_overlay_mem - the allocated FW overlay memory to free.
+ * @p_hwfn: HW device data.
+ * @fw_overlay_mem: The allocated FW overlay memory to free.
+ *
+ * Return: Void.
*/
void qed_fw_overlay_mem_free(struct qed_hwfn *p_hwfn,
- struct phys_mem_desc *fw_overlay_mem);
+ struct phys_mem_desc **fw_overlay_mem);
-/* Ystorm flow control mode. Use enum fw_flow_ctrl_mode */
-#define YSTORM_FLOW_CONTROL_MODE_OFFSET (IRO[0].base)
-#define YSTORM_FLOW_CONTROL_MODE_SIZE (IRO[0].size)
-
-/* Tstorm port statistics */
-#define TSTORM_PORT_STAT_OFFSET(port_id) \
- (IRO[1].base + ((port_id) * IRO[1].m1))
-#define TSTORM_PORT_STAT_SIZE (IRO[1].size)
-
-/* Tstorm ll2 port statistics */
-#define TSTORM_LL2_PORT_STAT_OFFSET(port_id) \
- (IRO[2].base + ((port_id) * IRO[2].m1))
-#define TSTORM_LL2_PORT_STAT_SIZE (IRO[2].size)
-
-/* Ustorm VF-PF Channel ready flag */
-#define USTORM_VF_PF_CHANNEL_READY_OFFSET(vf_id) \
- (IRO[3].base + ((vf_id) * IRO[3].m1))
-#define USTORM_VF_PF_CHANNEL_READY_SIZE (IRO[3].size)
-
-/* Ustorm Final flr cleanup ack */
-#define USTORM_FLR_FINAL_ACK_OFFSET(pf_id) \
- (IRO[4].base + ((pf_id) * IRO[4].m1))
-#define USTORM_FLR_FINAL_ACK_SIZE (IRO[4].size)
-
-/* Ustorm Event ring consumer */
-#define USTORM_EQE_CONS_OFFSET(pf_id) \
- (IRO[5].base + ((pf_id) * IRO[5].m1))
-#define USTORM_EQE_CONS_SIZE (IRO[5].size)
-
-/* Ustorm eth queue zone */
-#define USTORM_ETH_QUEUE_ZONE_OFFSET(queue_zone_id) \
- (IRO[6].base + ((queue_zone_id) * IRO[6].m1))
-#define USTORM_ETH_QUEUE_ZONE_SIZE (IRO[6].size)
-
-/* Ustorm Common Queue ring consumer */
-#define USTORM_COMMON_QUEUE_CONS_OFFSET(queue_zone_id) \
- (IRO[7].base + ((queue_zone_id) * IRO[7].m1))
-#define USTORM_COMMON_QUEUE_CONS_SIZE (IRO[7].size)
-
-/* Xstorm common PQ info */
-#define XSTORM_PQ_INFO_OFFSET(pq_id) \
- (IRO[8].base + ((pq_id) * IRO[8].m1))
-#define XSTORM_PQ_INFO_SIZE (IRO[8].size)
-
-/* Xstorm Integration Test Data */
-#define XSTORM_INTEG_TEST_DATA_OFFSET (IRO[9].base)
-#define XSTORM_INTEG_TEST_DATA_SIZE (IRO[9].size)
-
-/* Ystorm Integration Test Data */
-#define YSTORM_INTEG_TEST_DATA_OFFSET (IRO[10].base)
-#define YSTORM_INTEG_TEST_DATA_SIZE (IRO[10].size)
-
-/* Pstorm Integration Test Data */
-#define PSTORM_INTEG_TEST_DATA_OFFSET (IRO[11].base)
-#define PSTORM_INTEG_TEST_DATA_SIZE (IRO[11].size)
-
-/* Tstorm Integration Test Data */
-#define TSTORM_INTEG_TEST_DATA_OFFSET (IRO[12].base)
-#define TSTORM_INTEG_TEST_DATA_SIZE (IRO[12].size)
-
-/* Mstorm Integration Test Data */
-#define MSTORM_INTEG_TEST_DATA_OFFSET (IRO[13].base)
-#define MSTORM_INTEG_TEST_DATA_SIZE (IRO[13].size)
-
-/* Ustorm Integration Test Data */
-#define USTORM_INTEG_TEST_DATA_OFFSET (IRO[14].base)
-#define USTORM_INTEG_TEST_DATA_SIZE (IRO[14].size)
-
-/* Xstorm overlay buffer host address */
-#define XSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[15].base)
-#define XSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[15].size)
-
-/* Ystorm overlay buffer host address */
-#define YSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[16].base)
-#define YSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[16].size)
-
-/* Pstorm overlay buffer host address */
-#define PSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[17].base)
-#define PSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[17].size)
-
-/* Tstorm overlay buffer host address */
-#define TSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[18].base)
-#define TSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[18].size)
-
-/* Mstorm overlay buffer host address */
-#define MSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[19].base)
-#define MSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[19].size)
-
-/* Ustorm overlay buffer host address */
-#define USTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[20].base)
-#define USTORM_OVERLAY_BUF_ADDR_SIZE (IRO[20].size)
-
-/* Tstorm producers */
-#define TSTORM_LL2_RX_PRODS_OFFSET(core_rx_queue_id) \
- (IRO[21].base + ((core_rx_queue_id) * IRO[21].m1))
-#define TSTORM_LL2_RX_PRODS_SIZE (IRO[21].size)
-
-/* Tstorm LightL2 queue statistics */
-#define CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \
- (IRO[22].base + ((core_rx_queue_id) * IRO[22].m1))
-#define CORE_LL2_TSTORM_PER_QUEUE_STAT_SIZE (IRO[22].size)
-
-/* Ustorm LiteL2 queue statistics */
-#define CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \
- (IRO[23].base + ((core_rx_queue_id) * IRO[23].m1))
-#define CORE_LL2_USTORM_PER_QUEUE_STAT_SIZE (IRO[23].size)
-
-/* Pstorm LiteL2 queue statistics */
-#define CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(core_tx_stats_id) \
- (IRO[24].base + ((core_tx_stats_id) * IRO[24].m1))
-#define CORE_LL2_PSTORM_PER_QUEUE_STAT_SIZE (IRO[24].size)
-
-/* Mstorm queue statistics */
-#define MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
- (IRO[25].base + ((stat_counter_id) * IRO[25].m1))
-#define MSTORM_QUEUE_STAT_SIZE (IRO[25].size)
-
-/* TPA agregation timeout in us resolution (on ASIC) */
-#define MSTORM_TPA_TIMEOUT_US_OFFSET (IRO[26].base)
-#define MSTORM_TPA_TIMEOUT_US_SIZE (IRO[26].size)
-
-/* Mstorm ETH VF queues producers offset in RAM. Used in default VF zone size
- * mode
- */
-#define MSTORM_ETH_VF_PRODS_OFFSET(vf_id, vf_queue_id) \
- (IRO[27].base + ((vf_id) * IRO[27].m1) + ((vf_queue_id) * IRO[27].m2))
-#define MSTORM_ETH_VF_PRODS_SIZE (IRO[27].size)
-
-/* Mstorm ETH PF queues producers */
-#define MSTORM_ETH_PF_PRODS_OFFSET(queue_id) \
- (IRO[28].base + ((queue_id) * IRO[28].m1))
-#define MSTORM_ETH_PF_PRODS_SIZE (IRO[28].size)
-
-/* Mstorm pf statistics */
-#define MSTORM_ETH_PF_STAT_OFFSET(pf_id) \
- (IRO[29].base + ((pf_id) * IRO[29].m1))
-#define MSTORM_ETH_PF_STAT_SIZE (IRO[29].size)
-
-/* Ustorm queue statistics */
-#define USTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
- (IRO[30].base + ((stat_counter_id) * IRO[30].m1))
-#define USTORM_QUEUE_STAT_SIZE (IRO[30].size)
-
-/* Ustorm pf statistics */
-#define USTORM_ETH_PF_STAT_OFFSET(pf_id) \
- (IRO[31].base + ((pf_id) * IRO[31].m1))
-#define USTORM_ETH_PF_STAT_SIZE (IRO[31].size)
-
-/* Pstorm queue statistics */
-#define PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
- (IRO[32].base + ((stat_counter_id) * IRO[32].m1))
-#define PSTORM_QUEUE_STAT_SIZE (IRO[32].size)
-
-/* Pstorm pf statistics */
-#define PSTORM_ETH_PF_STAT_OFFSET(pf_id) \
- (IRO[33].base + ((pf_id) * IRO[33].m1))
-#define PSTORM_ETH_PF_STAT_SIZE (IRO[33].size)
-
-/* Control frame's EthType configuration for TX control frame security */
-#define PSTORM_CTL_FRAME_ETHTYPE_OFFSET(eth_type_id) \
- (IRO[34].base + ((eth_type_id) * IRO[34].m1))
-#define PSTORM_CTL_FRAME_ETHTYPE_SIZE (IRO[34].size)
-
-/* Tstorm last parser message */
-#define TSTORM_ETH_PRS_INPUT_OFFSET (IRO[35].base)
-#define TSTORM_ETH_PRS_INPUT_SIZE (IRO[35].size)
-
-/* Tstorm Eth limit Rx rate */
-#define ETH_RX_RATE_LIMIT_OFFSET(pf_id) \
- (IRO[36].base + ((pf_id) * IRO[36].m1))
-#define ETH_RX_RATE_LIMIT_SIZE (IRO[36].size)
-
-/* RSS indirection table entry update command per PF offset in TSTORM PF BAR0.
- * Use eth_tstorm_rss_update_data for update
- */
-#define TSTORM_ETH_RSS_UPDATE_OFFSET(pf_id) \
- (IRO[37].base + ((pf_id) * IRO[37].m1))
-#define TSTORM_ETH_RSS_UPDATE_SIZE (IRO[37].size)
-
-/* Xstorm queue zone */
-#define XSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) \
- (IRO[38].base + ((queue_id) * IRO[38].m1))
-#define XSTORM_ETH_QUEUE_ZONE_SIZE (IRO[38].size)
-
-/* Ystorm cqe producer */
-#define YSTORM_TOE_CQ_PROD_OFFSET(rss_id) \
- (IRO[39].base + ((rss_id) * IRO[39].m1))
-#define YSTORM_TOE_CQ_PROD_SIZE (IRO[39].size)
-
-/* Ustorm cqe producer */
-#define USTORM_TOE_CQ_PROD_OFFSET(rss_id) \
- (IRO[40].base + ((rss_id) * IRO[40].m1))
-#define USTORM_TOE_CQ_PROD_SIZE (IRO[40].size)
-
-/* Ustorm grq producer */
-#define USTORM_TOE_GRQ_PROD_OFFSET(pf_id) \
- (IRO[41].base + ((pf_id) * IRO[41].m1))
-#define USTORM_TOE_GRQ_PROD_SIZE (IRO[41].size)
-
-/* Tstorm cmdq-cons of given command queue-id */
-#define TSTORM_SCSI_CMDQ_CONS_OFFSET(cmdq_queue_id) \
- (IRO[42].base + ((cmdq_queue_id) * IRO[42].m1))
-#define TSTORM_SCSI_CMDQ_CONS_SIZE (IRO[42].size)
-
-/* Tstorm (reflects M-Storm) bdq-external-producer of given function ID,
- * BDqueue-id
+#define PCICFG_OFFSET 0x2000
+#define GRC_CONFIG_REG_PF_INIT_VF 0x624
+
+/* First VF_NUM for PF is encoded in this register.
+ * The number of VFs assigned to a PF is assumed to be a multiple of 8.
+ * Software should program these bits based on Total Number of VFs programmed
+ * for each PF.
+ * Since registers from 0x000-0x7ff are spilt across functions, each PF will
+ * have the same location for the same 4 bits
*/
-#define TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(storage_func_id, bdq_id) \
- (IRO[43].base + ((storage_func_id) * IRO[43].m1) + \
- ((bdq_id) * IRO[43].m2))
-#define TSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[43].size)
-
-/* Mstorm bdq-external-producer of given BDQ resource ID, BDqueue-id */
-#define MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(storage_func_id, bdq_id) \
- (IRO[44].base + ((storage_func_id) * IRO[44].m1) + \
- ((bdq_id) * IRO[44].m2))
-#define MSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[44].size)
-
-/* Tstorm iSCSI RX stats */
-#define TSTORM_ISCSI_RX_STATS_OFFSET(storage_func_id) \
- (IRO[45].base + ((storage_func_id) * IRO[45].m1))
-#define TSTORM_ISCSI_RX_STATS_SIZE (IRO[45].size)
-
-/* Mstorm iSCSI RX stats */
-#define MSTORM_ISCSI_RX_STATS_OFFSET(storage_func_id) \
- (IRO[46].base + ((storage_func_id) * IRO[46].m1))
-#define MSTORM_ISCSI_RX_STATS_SIZE (IRO[46].size)
-
-/* Ustorm iSCSI RX stats */
-#define USTORM_ISCSI_RX_STATS_OFFSET(storage_func_id) \
- (IRO[47].base + ((storage_func_id) * IRO[47].m1))
-#define USTORM_ISCSI_RX_STATS_SIZE (IRO[47].size)
-
-/* Xstorm iSCSI TX stats */
-#define XSTORM_ISCSI_TX_STATS_OFFSET(storage_func_id) \
- (IRO[48].base + ((storage_func_id) * IRO[48].m1))
-#define XSTORM_ISCSI_TX_STATS_SIZE (IRO[48].size)
-
-/* Ystorm iSCSI TX stats */
-#define YSTORM_ISCSI_TX_STATS_OFFSET(storage_func_id) \
- (IRO[49].base + ((storage_func_id) * IRO[49].m1))
-#define YSTORM_ISCSI_TX_STATS_SIZE (IRO[49].size)
-
-/* Pstorm iSCSI TX stats */
-#define PSTORM_ISCSI_TX_STATS_OFFSET(storage_func_id) \
- (IRO[50].base + ((storage_func_id) * IRO[50].m1))
-#define PSTORM_ISCSI_TX_STATS_SIZE (IRO[50].size)
-
-/* Tstorm FCoE RX stats */
-#define TSTORM_FCOE_RX_STATS_OFFSET(pf_id) \
- (IRO[51].base + ((pf_id) * IRO[51].m1))
-#define TSTORM_FCOE_RX_STATS_SIZE (IRO[51].size)
-
-/* Pstorm FCoE TX stats */
-#define PSTORM_FCOE_TX_STATS_OFFSET(pf_id) \
- (IRO[52].base + ((pf_id) * IRO[52].m1))
-#define PSTORM_FCOE_TX_STATS_SIZE (IRO[52].size)
-
-/* Pstorm RDMA queue statistics */
-#define PSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \
- (IRO[53].base + ((rdma_stat_counter_id) * IRO[53].m1))
-#define PSTORM_RDMA_QUEUE_STAT_SIZE (IRO[53].size)
-
-/* Tstorm RDMA queue statistics */
-#define TSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \
- (IRO[54].base + ((rdma_stat_counter_id) * IRO[54].m1))
-#define TSTORM_RDMA_QUEUE_STAT_SIZE (IRO[54].size)
-
-/* Xstorm error level for assert */
-#define XSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
- (IRO[55].base + ((pf_id) * IRO[55].m1))
-#define XSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[55].size)
-
-/* Ystorm error level for assert */
-#define YSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
- (IRO[56].base + ((pf_id) * IRO[56].m1))
-#define YSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[56].size)
-
-/* Pstorm error level for assert */
-#define PSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
- (IRO[57].base + ((pf_id) * IRO[57].m1))
-#define PSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[57].size)
-
-/* Tstorm error level for assert */
-#define TSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
- (IRO[58].base + ((pf_id) * IRO[58].m1))
-#define TSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[58].size)
-
-/* Mstorm error level for assert */
-#define MSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
- (IRO[59].base + ((pf_id) * IRO[59].m1))
-#define MSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[59].size)
-
-/* Ustorm error level for assert */
-#define USTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
- (IRO[60].base + ((pf_id) * IRO[60].m1))
-#define USTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[60].size)
-
-/* Xstorm iWARP rxmit stats */
-#define XSTORM_IWARP_RXMIT_STATS_OFFSET(pf_id) \
- (IRO[61].base + ((pf_id) * IRO[61].m1))
-#define XSTORM_IWARP_RXMIT_STATS_SIZE (IRO[61].size)
-
-/* Tstorm RoCE Event Statistics */
-#define TSTORM_ROCE_EVENTS_STAT_OFFSET(roce_pf_id) \
- (IRO[62].base + ((roce_pf_id) * IRO[62].m1))
-#define TSTORM_ROCE_EVENTS_STAT_SIZE (IRO[62].size)
-
-/* DCQCN Received Statistics */
-#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_OFFSET(roce_pf_id)\
- (IRO[63].base + ((roce_pf_id) * IRO[63].m1))
-#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_SIZE (IRO[63].size)
-
-/* RoCE Error Statistics */
-#define YSTORM_ROCE_ERROR_STATS_OFFSET(roce_pf_id) \
- (IRO[64].base + ((roce_pf_id) * IRO[64].m1))
-#define YSTORM_ROCE_ERROR_STATS_SIZE (IRO[64].size)
-
-/* DCQCN Sent Statistics */
-#define PSTORM_ROCE_DCQCN_SENT_STATS_OFFSET(roce_pf_id) \
- (IRO[65].base + ((roce_pf_id) * IRO[65].m1))
-#define PSTORM_ROCE_DCQCN_SENT_STATS_SIZE (IRO[65].size)
-
-/* RoCE CQEs Statistics */
-#define USTORM_ROCE_CQE_STATS_OFFSET(roce_pf_id) \
- (IRO[66].base + ((roce_pf_id) * IRO[66].m1))
-#define USTORM_ROCE_CQE_STATS_SIZE (IRO[66].size)
+#define GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK 0xff
/* Runtime array offsets */
#define DORQ_REG_PF_MAX_ICID_0_RT_OFFSET 0
@@ -4721,116 +3114,118 @@ void qed_fw_overlay_mem_free(struct qed_hwfn *p_hwfn,
#define QM_REG_TXPQMAP_RT_SIZE 512
#define QM_REG_WFQVPWEIGHT_RT_OFFSET 31556
#define QM_REG_WFQVPWEIGHT_RT_SIZE 512
-#define QM_REG_WFQVPCRD_RT_OFFSET 32068
+#define QM_REG_WFQVPUPPERBOUND_RT_OFFSET 32068
+#define QM_REG_WFQVPUPPERBOUND_RT_SIZE 512
+#define QM_REG_WFQVPCRD_RT_OFFSET 32580
#define QM_REG_WFQVPCRD_RT_SIZE 512
-#define QM_REG_WFQVPMAP_RT_OFFSET 32580
+#define QM_REG_WFQVPMAP_RT_OFFSET 33092
#define QM_REG_WFQVPMAP_RT_SIZE 512
-#define QM_REG_PTRTBLTX_RT_OFFSET 33092
+#define QM_REG_PTRTBLTX_RT_OFFSET 33604
#define QM_REG_PTRTBLTX_RT_SIZE 1024
-#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 34116
+#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 34628
#define QM_REG_WFQPFCRD_MSB_RT_SIZE 160
-#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 34276
-#define NIG_REG_BRB_GATE_DNTFWD_PORT_RT_OFFSET 34277
-#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 34278
-#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 34279
-#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 34280
-#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 34281
-#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 34282
-#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 34283
+#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 34788
+#define NIG_REG_BRB_GATE_DNTFWD_PORT_RT_OFFSET 34789
+#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 34790
+#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 34791
+#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 34792
+#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 34793
+#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 34794
+#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 34795
#define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE 4
-#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 34287
+#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 34799
#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE 4
-#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 34291
+#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 34803
#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE 32
-#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 34323
+#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 34835
#define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE 16
-#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 34339
+#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 34851
#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE 16
-#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 34355
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 34867
#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 16
-#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 34371
+#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 34883
#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE 16
-#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 34387
-#define NIG_REG_PPF_TO_ENGINE_SEL_RT_OFFSET 34388
+#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 34899
+#define NIG_REG_PPF_TO_ENGINE_SEL_RT_OFFSET 34900
#define NIG_REG_PPF_TO_ENGINE_SEL_RT_SIZE 8
-#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 34396
-#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 34397
-#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 34398
-#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 34399
-#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 34400
-#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 34401
-#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 34402
-#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 34403
-#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 34404
-#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 34405
-#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 34406
-#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 34407
-#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 34408
-#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 34409
-#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 34410
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 34411
-#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 34412
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 34413
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 34414
-#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 34415
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 34416
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 34417
-#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 34418
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 34419
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 34420
-#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 34421
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 34422
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 34423
-#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 34424
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 34425
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 34426
-#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 34427
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 34428
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 34429
-#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 34430
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 34431
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 34432
-#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 34433
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 34434
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 34435
-#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 34436
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 34437
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 34438
-#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 34439
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 34440
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 34441
-#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 34442
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 34443
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 34444
-#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 34445
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 34446
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 34447
-#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 34448
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 34449
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 34450
-#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 34451
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 34452
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 34453
-#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 34454
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 34455
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 34456
-#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 34457
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 34458
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 34459
-#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 34460
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 34461
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 34462
-#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 34463
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 34464
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 34465
-#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 34466
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 34467
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 34468
-#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 34469
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 34470
-#define XCM_REG_CON_PHY_Q3_RT_OFFSET 34471
-
-#define RUNTIME_ARRAY_SIZE 34472
+#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 34908
+#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 34909
+#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 34910
+#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 34911
+#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 34912
+#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 34913
+#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 34914
+#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 34915
+#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 34916
+#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 34917
+#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 34918
+#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 34919
+#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 34920
+#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 34921
+#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 34922
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 34923
+#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 34924
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 34925
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 34926
+#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 34927
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 34928
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 34929
+#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 34930
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 34931
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 34932
+#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 34933
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 34934
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 34935
+#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 34936
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 34937
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 34938
+#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 34939
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 34940
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 34941
+#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 34942
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 34943
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 34944
+#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 34945
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 34946
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 34947
+#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 34948
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 34949
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 34950
+#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 34951
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 34952
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 34953
+#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 34954
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 34955
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 34956
+#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 34957
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 34958
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 34959
+#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 34960
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 34961
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 34962
+#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 34963
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 34964
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 34965
+#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 34966
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 34967
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 34968
+#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 34969
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 34970
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 34971
+#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 34972
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 34973
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 34974
+#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 34975
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 34976
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 34977
+#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 34978
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 34979
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 34980
+#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 34981
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 34982
+#define XCM_REG_CON_PHY_Q3_RT_OFFSET 34983
+
+#define RUNTIME_ARRAY_SIZE 34984
/* Init Callbacks */
#define DMAE_READY_CB 0
@@ -4850,216 +3245,216 @@ struct xstorm_eth_conn_st_ctx {
__le32 reserved[60];
};
-struct e4_xstorm_eth_conn_ag_ctx {
+struct xstorm_eth_conn_ag_ctx {
u8 reserved0;
u8 state;
u8 flags0;
-#define E4_XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED1_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED1_SHIFT 1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED2_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED2_SHIFT 2
-#define E4_XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED3_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED3_SHIFT 4
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED4_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED4_SHIFT 5
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED5_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED5_SHIFT 6
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED6_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED6_SHIFT 7
+#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED1_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED2_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED3_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED4_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED4_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED5_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED5_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED6_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED6_SHIFT 7
u8 flags1;
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED7_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED7_SHIFT 0
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED8_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED8_SHIFT 1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED9_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED9_SHIFT 2
-#define E4_XSTORM_ETH_CONN_AG_CTX_BIT11_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_BIT11_SHIFT 3
-#define E4_XSTORM_ETH_CONN_AG_CTX_E5_RESERVED2_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_E5_RESERVED2_SHIFT 4
-#define E4_XSTORM_ETH_CONN_AG_CTX_E5_RESERVED3_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_E5_RESERVED3_SHIFT 5
-#define E4_XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
-#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED7_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED7_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED8_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED8_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED9_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED9_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_BIT11_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_BIT11_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_E5_RESERVED2_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_E5_RESERVED2_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_E5_RESERVED3_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_E5_RESERVED3_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
u8 flags2;
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 0
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 2
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 4
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 6
u8 flags3;
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 0
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 2
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 4
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 6
u8 flags4;
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 0
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 2
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 4
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF11_MASK 0x3
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF11_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_CF11_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF11_SHIFT 6
u8 flags5;
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF12_MASK 0x3
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF12_SHIFT 0
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF13_MASK 0x3
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF13_SHIFT 2
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF14_MASK 0x3
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF14_SHIFT 4
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF15_MASK 0x3
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF15_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_CF12_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF12_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_CF13_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF13_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_CF14_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF14_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_CF15_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF15_SHIFT 6
u8 flags6;
-#define E4_XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3
-#define E4_XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0
-#define E4_XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3
-#define E4_XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2
-#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_MASK 0x3
-#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_SHIFT 4
-#define E4_XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_MASK 0x3
-#define E4_XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
u8 flags7;
-#define E4_XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
-#define E4_XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED10_MASK 0x3
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED10_SHIFT 2
-#define E4_XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_MASK 0x3
-#define E4_XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_SHIFT 4
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 6
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 7
+#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED10_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED10_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 7
u8 flags8;
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 0
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 1
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 2
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 3
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 4
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 5
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 6
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 7
+#define XSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 7
u8 flags9;
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 0
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF11EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF11EN_SHIFT 1
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF12EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF12EN_SHIFT 2
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF13EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF13EN_SHIFT 3
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF14EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF14EN_SHIFT 4
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF15EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF15EN_SHIFT 5
-#define E4_XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6
-#define E4_XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7
+#define XSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_CF11EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF11EN_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_CF12EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF12EN_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_CF13EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF13EN_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_CF14EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF14EN_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_CF15EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF15EN_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7
u8 flags10;
-#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
-#define E4_XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
-#define E4_XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED11_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED11_SHIFT 3
-#define E4_XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
-#define E4_XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED12_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED12_SHIFT 6
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED13_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED13_SHIFT 7
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED11_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED11_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED12_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED12_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED13_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED13_SHIFT 7
u8 flags11;
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED14_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED14_SHIFT 0
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED15_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED15_SHIFT 1
-#define E4_XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
-#define E4_XSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 3
-#define E4_XSTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 4
-#define E4_XSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 5
-#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
-#define E4_XSTORM_ETH_CONN_AG_CTX_RULE9EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RULE9EN_SHIFT 7
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED14_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED14_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED15_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED15_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE9EN_SHIFT 7
u8 flags12;
-#define E4_XSTORM_ETH_CONN_AG_CTX_RULE10EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RULE10EN_SHIFT 0
-#define E4_XSTORM_ETH_CONN_AG_CTX_RULE11EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RULE11EN_SHIFT 1
-#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
-#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
-#define E4_XSTORM_ETH_CONN_AG_CTX_RULE14EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RULE14EN_SHIFT 4
-#define E4_XSTORM_ETH_CONN_AG_CTX_RULE15EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RULE15EN_SHIFT 5
-#define E4_XSTORM_ETH_CONN_AG_CTX_RULE16EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RULE16EN_SHIFT 6
-#define E4_XSTORM_ETH_CONN_AG_CTX_RULE17EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RULE17EN_SHIFT 7
+#define XSTORM_ETH_CONN_AG_CTX_RULE10EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE10EN_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE14EN_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE17EN_SHIFT 7
u8 flags13;
-#define E4_XSTORM_ETH_CONN_AG_CTX_RULE18EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RULE18EN_SHIFT 0
-#define E4_XSTORM_ETH_CONN_AG_CTX_RULE19EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RULE19EN_SHIFT 1
-#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
-#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
-#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
-#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
-#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
-#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+#define XSTORM_ETH_CONN_AG_CTX_RULE18EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE18EN_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE19EN_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
u8 flags14;
-#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0
-#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1
-#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2
-#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3
-#define E4_XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4
-#define E4_XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
-#define E4_XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_MASK 0x3
-#define E4_XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_SHIFT 6
u8 edpm_event_id;
__le16 physical_q0;
__le16 e5_reserved1;
@@ -5118,37 +3513,37 @@ struct ystorm_eth_conn_st_ctx {
__le32 reserved[8];
};
-struct e4_ystorm_eth_conn_ag_ctx {
+struct ystorm_eth_conn_ag_ctx {
u8 byte0;
u8 state;
u8 flags0;
-#define E4_YSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1
-#define E4_YSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
-#define E4_YSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_YSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3
-#define E4_YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 2
-#define E4_YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_MASK 0x3
-#define E4_YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_SHIFT 4
-#define E4_YSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_YSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
+#define YSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1
+#define YSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
+#define YSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
+#define YSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
+#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3
+#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 2
+#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_MASK 0x3
+#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_SHIFT 4
+#define YSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
+#define YSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define E4_YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1
-#define E4_YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 0
-#define E4_YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_MASK 0x1
-#define E4_YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_SHIFT 1
-#define E4_YSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_YSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
-#define E4_YSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_YSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define E4_YSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_YSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define E4_YSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_YSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define E4_YSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_YSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define E4_YSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_YSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1
+#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 0
+#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_MASK 0x1
+#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_SHIFT 1
+#define YSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
+#define YSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
+#define YSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define YSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define YSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define YSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define YSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define YSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define YSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define YSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define YSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define YSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7
u8 tx_q0_int_coallecing_timeset;
u8 byte3;
__le16 word0;
@@ -5162,89 +3557,89 @@ struct e4_ystorm_eth_conn_ag_ctx {
__le32 reg3;
};
-struct e4_tstorm_eth_conn_ag_ctx {
+struct tstorm_eth_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define E4_TSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
-#define E4_TSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_TSTORM_ETH_CONN_AG_CTX_BIT2_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_BIT2_SHIFT 2
-#define E4_TSTORM_ETH_CONN_AG_CTX_BIT3_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_BIT3_SHIFT 3
-#define E4_TSTORM_ETH_CONN_AG_CTX_BIT4_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_BIT4_SHIFT 4
-#define E4_TSTORM_ETH_CONN_AG_CTX_BIT5_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_BIT5_SHIFT 5
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 6
+#define TSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
+#define TSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
+#define TSTORM_ETH_CONN_AG_CTX_BIT2_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_BIT2_SHIFT 2
+#define TSTORM_ETH_CONN_AG_CTX_BIT3_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_BIT3_SHIFT 3
+#define TSTORM_ETH_CONN_AG_CTX_BIT4_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_BIT4_SHIFT 4
+#define TSTORM_ETH_CONN_AG_CTX_BIT5_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_BIT5_SHIFT 5
+#define TSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 6
u8 flags1;
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 0
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 2
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 4
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 6
+#define TSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 0
+#define TSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 2
+#define TSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 4
+#define TSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 6
u8 flags2;
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 0
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 2
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 4
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 6
+#define TSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 0
+#define TSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 2
+#define TSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 4
+#define TSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 6
u8 flags3;
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 0
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 2
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 4
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 5
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 6
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 7
+#define TSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 0
+#define TSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 2
+#define TSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 4
+#define TSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 5
+#define TSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 6
+#define TSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 7
u8 flags4;
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 0
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 1
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 2
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 3
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 4
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 5
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 6
-#define E4_TSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define TSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 0
+#define TSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 1
+#define TSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 2
+#define TSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 3
+#define TSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 4
+#define TSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 5
+#define TSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 6
+#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags5;
-#define E4_TSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define E4_TSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define E4_TSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define E4_TSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define E4_TSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define E4_TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_SHIFT 5
-#define E4_TSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define E4_TSTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_SHIFT 5
+#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7
__le32 reg0;
__le32 reg1;
__le32 reg2;
@@ -5266,63 +3661,63 @@ struct e4_tstorm_eth_conn_ag_ctx {
__le32 reg10;
};
-struct e4_ustorm_eth_conn_ag_ctx {
+struct ustorm_eth_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define E4_USTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1
-#define E4_USTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
-#define E4_USTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_USTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_MASK 0x3
-#define E4_USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_SHIFT 2
-#define E4_USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_MASK 0x3
-#define E4_USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_SHIFT 4
-#define E4_USTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_USTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
+#define USTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
+#define USTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
+#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_MASK 0x3
+#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_SHIFT 2
+#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_MASK 0x3
+#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_SHIFT 4
+#define USTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
+#define USTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define E4_USTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3
-#define E4_USTORM_ETH_CONN_AG_CTX_CF3_SHIFT 0
-#define E4_USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_MASK 0x3
-#define E4_USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_SHIFT 2
-#define E4_USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_MASK 0x3
-#define E4_USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_SHIFT 4
-#define E4_USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3
-#define E4_USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 6
+#define USTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3
+#define USTORM_ETH_CONN_AG_CTX_CF3_SHIFT 0
+#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_MASK 0x3
+#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_SHIFT 2
+#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_MASK 0x3
+#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_SHIFT 4
+#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3
+#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 6
u8 flags2;
-#define E4_USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_MASK 0x1
-#define E4_USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_SHIFT 0
-#define E4_USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_MASK 0x1
-#define E4_USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_SHIFT 1
-#define E4_USTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_USTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
-#define E4_USTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1
-#define E4_USTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 3
-#define E4_USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_MASK 0x1
-#define E4_USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_SHIFT 4
-#define E4_USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_MASK 0x1
-#define E4_USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_SHIFT 5
-#define E4_USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1
-#define E4_USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 6
-#define E4_USTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_USTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_SHIFT 0
+#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_SHIFT 1
+#define USTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
+#define USTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 3
+#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_SHIFT 4
+#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_SHIFT 5
+#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 6
+#define USTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags3;
-#define E4_USTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_USTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define E4_USTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_USTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define E4_USTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_USTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define E4_USTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_USTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define E4_USTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define E4_USTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define E4_USTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define E4_USTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define E4_USTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define E4_USTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define E4_USTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define E4_USTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define USTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define USTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define USTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define USTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define USTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define USTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define USTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define USTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7
u8 byte2;
u8 byte3;
__le16 word0;
@@ -5346,16 +3741,16 @@ struct mstorm_eth_conn_st_ctx {
};
/* eth connection context */
-struct e4_eth_conn_context {
+struct eth_conn_context {
struct tstorm_eth_conn_st_ctx tstorm_st_context;
struct regpair tstorm_st_padding[2];
struct pstorm_eth_conn_st_ctx pstorm_st_context;
struct xstorm_eth_conn_st_ctx xstorm_st_context;
- struct e4_xstorm_eth_conn_ag_ctx xstorm_ag_context;
- struct e4_tstorm_eth_conn_ag_ctx tstorm_ag_context;
+ struct xstorm_eth_conn_ag_ctx xstorm_ag_context;
+ struct tstorm_eth_conn_ag_ctx tstorm_ag_context;
struct ystorm_eth_conn_st_ctx ystorm_st_context;
- struct e4_ystorm_eth_conn_ag_ctx ystorm_ag_context;
- struct e4_ustorm_eth_conn_ag_ctx ustorm_ag_context;
+ struct ystorm_eth_conn_ag_ctx ystorm_ag_context;
+ struct ustorm_eth_conn_ag_ctx ustorm_ag_context;
struct ustorm_eth_conn_st_ctx ustorm_st_context;
struct mstorm_eth_conn_st_ctx mstorm_st_context;
};
@@ -5512,7 +3907,7 @@ enum eth_ramrod_cmd_id {
ETH_RAMROD_RX_ADD_UDP_FILTER,
ETH_RAMROD_RX_DELETE_UDP_FILTER,
ETH_RAMROD_RX_CREATE_GFT_ACTION,
- ETH_RAMROD_GFT_UPDATE_FILTER,
+ ETH_RAMROD_RX_UPDATE_GFT_FILTER,
ETH_RAMROD_TX_QUEUE_UPDATE,
ETH_RAMROD_RGFS_FILTER_ADD,
ETH_RAMROD_RGFS_FILTER_DEL,
@@ -5596,10 +3991,12 @@ struct eth_vport_rss_config {
u8 update_rss_ind_table;
u8 update_rss_capabilities;
u8 tbl_size;
- __le32 reserved2[2];
+ u8 ind_table_mask_valid;
+ u8 reserved2[3];
__le16 indirection_table[ETH_RSS_IND_TABLE_ENTRIES_NUM];
+ __le32 ind_table_mask[ETH_RSS_IND_TABLE_MASK_SIZE_REGS];
__le32 rss_key[ETH_RSS_KEY_SIZE_REGS];
- __le32 reserved3[2];
+ __le32 reserved3;
};
/* eth vport RSS mode */
@@ -5674,8 +4071,20 @@ enum gft_filter_update_action {
MAX_GFT_FILTER_UPDATE_ACTION
};
+/* Ramrod data for rx create gft action */
+struct rx_create_gft_action_ramrod_data {
+ u8 vport_id;
+ u8 reserved[7];
+};
+
+/* Ramrod data for rx create openflow action */
+struct rx_create_openflow_action_ramrod_data {
+ u8 vport_id;
+ u8 reserved[7];
+};
+
/* Ramrod data for rx add openflow filter */
-struct rx_add_openflow_filter_data {
+struct rx_openflow_filter_ramrod_data {
__le16 action_icid;
u8 priority;
u8 reserved0;
@@ -5698,18 +4107,6 @@ struct rx_add_openflow_filter_data {
__le16 l4_src_port;
};
-/* Ramrod data for rx create gft action */
-struct rx_create_gft_action_data {
- u8 vport_id;
- u8 reserved[7];
-};
-
-/* Ramrod data for rx create openflow action */
-struct rx_create_openflow_action_data {
- u8 vport_id;
- u8 reserved[7];
-};
-
/* Ramrod data for rx queue start ramrod */
struct rx_queue_start_ramrod_data {
__le16 rx_queue_id;
@@ -5768,7 +4165,7 @@ struct rx_queue_update_ramrod_data {
};
/* Ramrod data for rx Add UDP Filter */
-struct rx_udp_filter_data {
+struct rx_udp_filter_ramrod_data {
__le16 action_icid;
__le16 vlan_id;
u8 ip_type;
@@ -5784,7 +4181,7 @@ struct rx_udp_filter_data {
/* Add or delete GFT filter - filter is packet header of type of packet wished
* to pass certain FW flow.
*/
-struct rx_update_gft_filter_data {
+struct rx_update_gft_filter_ramrod_data {
struct regpair pkt_hdr_addr;
__le16 pkt_hdr_length;
__le16 action_icid;
@@ -5824,7 +4221,8 @@ struct tx_queue_start_ramrod_data {
u8 pxp_tph_valid_bd;
u8 pxp_tph_valid_pkt;
__le16 pxp_st_index;
- __le16 comp_agg_size;
+ u8 comp_agg_size;
+ u8 reserved3;
__le16 queue_zone_id;
__le16 reserved2;
__le16 pbl_size;
@@ -5945,7 +4343,12 @@ struct vport_update_ramrod_data_cmn {
u8 ctl_frame_ethtype_check_en;
u8 update_in_to_in_pri_map_mode;
u8 in_to_in_pri_map[8];
- u8 reserved[6];
+ u8 update_tx_dst_port_mode_flg;
+ u8 tx_dst_port_mode_config;
+ u8 dst_vport_id;
+ u8 tx_dst_port_mode;
+ u8 dst_vport_id_valid;
+ u8 reserved[1];
};
struct vport_update_ramrod_mcast {
@@ -5964,7 +4367,7 @@ struct vport_update_ramrod_data {
struct eth_vport_rss_config rss_config;
};
-struct e4_xstorm_eth_conn_ag_ctx_dq_ext_ldpart {
+struct xstorm_eth_conn_ag_ctx_dq_ext_ldpart {
u8 reserved0;
u8 state;
u8 flags0;
@@ -6193,253 +4596,253 @@ struct e4_xstorm_eth_conn_ag_ctx_dq_ext_ldpart {
__le32 reg4;
};
-struct e4_mstorm_eth_conn_ag_ctx {
+struct mstorm_eth_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define E4_MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define E4_MSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_MSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_MSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_MSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 2
-#define E4_MSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_MSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 4
-#define E4_MSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_MSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
+#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define MSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
+#define MSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3
+#define MSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 2
+#define MSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3
+#define MSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 4
+#define MSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
+#define MSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define E4_MSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_MSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 0
-#define E4_MSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_MSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 1
-#define E4_MSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_MSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
-#define E4_MSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_MSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define E4_MSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_MSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define E4_MSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_MSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define E4_MSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_MSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define E4_MSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_MSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define MSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 0
+#define MSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 1
+#define MSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
+#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7
__le16 word0;
__le16 word1;
__le32 reg0;
__le32 reg1;
};
-struct e4_xstorm_eth_hw_conn_ag_ctx {
+struct xstorm_eth_hw_conn_ag_ctx {
u8 reserved0;
u8 state;
u8 flags0;
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_SHIFT 1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_SHIFT 2
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_SHIFT 4
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_SHIFT 5
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_SHIFT 6
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_SHIFT 7
+#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_SHIFT 7
u8 flags1;
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_SHIFT 0
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_SHIFT 1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_SHIFT 2
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_BIT11_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_BIT11_SHIFT 3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED2_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED2_SHIFT 4
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED3_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED3_SHIFT 5
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT11_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT11_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED2_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED2_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED3_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED3_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
u8 flags2;
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF0_SHIFT 0
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF1_SHIFT 2
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF2_SHIFT 4
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF3_MASK 0x3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF3_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF0_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF0_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF1_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF1_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF2_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF2_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF3_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF3_SHIFT 6
u8 flags3;
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF4_MASK 0x3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF4_SHIFT 0
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF5_MASK 0x3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF5_SHIFT 2
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF6_MASK 0x3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF6_SHIFT 4
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF7_MASK 0x3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF7_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF4_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF4_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF5_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF5_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF6_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF6_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF7_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF7_SHIFT 6
u8 flags4;
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF8_MASK 0x3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF8_SHIFT 0
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF9_MASK 0x3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF9_SHIFT 2
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF10_MASK 0x3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF10_SHIFT 4
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF11_MASK 0x3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF11_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF8_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF8_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF9_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF9_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF10_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF10_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF11_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF11_SHIFT 6
u8 flags5;
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF12_MASK 0x3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF12_SHIFT 0
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF13_MASK 0x3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF13_SHIFT 2
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF14_MASK 0x3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF14_SHIFT 4
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF15_MASK 0x3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF15_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF12_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF12_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF13_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF13_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF14_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF14_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF15_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF15_SHIFT 6
u8 flags6;
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_MASK 0x3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_SHIFT 4
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_MASK 0x3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
u8 flags7;
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_MASK 0x3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_SHIFT 2
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_MASK 0x3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_SHIFT 4
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_SHIFT 6
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_SHIFT 7
+#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_SHIFT 7
u8 flags8;
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_SHIFT 0
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_SHIFT 1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_SHIFT 2
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_SHIFT 3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_SHIFT 4
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_SHIFT 5
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_SHIFT 6
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_SHIFT 7
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_SHIFT 7
u8 flags9;
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_SHIFT 0
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_SHIFT 1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_SHIFT 2
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_SHIFT 3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_SHIFT 4
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_SHIFT 5
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7
u8 flags10;
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_SHIFT 3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_SHIFT 6
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_SHIFT 7
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_SHIFT 7
u8 flags11;
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_SHIFT 0
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_SHIFT 1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_SHIFT 3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_SHIFT 4
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_SHIFT 5
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_SHIFT 7
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_SHIFT 7
u8 flags12;
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_SHIFT 0
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_SHIFT 1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_SHIFT 4
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_SHIFT 5
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_SHIFT 6
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_SHIFT 7
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_SHIFT 7
u8 flags13;
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_SHIFT 0
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_SHIFT 1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
u8 flags14;
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_MASK 0x3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_SHIFT 6
u8 edpm_event_id;
__le16 physical_q0;
__le16 e5_reserved1;
@@ -6479,7 +4882,6 @@ struct gft_cam_line_mapped {
#define GFT_CAM_LINE_MAPPED_RESERVED1_SHIFT 29
};
-
/* Used in gft_profile_key: Indication for ip version */
enum gft_profile_ip_version {
GFT_PROFILE_IPV4 = 0,
@@ -6640,49 +5042,49 @@ struct ystorm_rdma_task_st_ctx {
struct regpair temp[4];
};
-struct e4_ystorm_rdma_task_ag_ctx {
+struct ystorm_rdma_task_ag_ctx {
u8 reserved;
u8 byte1;
__le16 msem_ctx_upd_seq;
u8 flags0;
-#define E4_YSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
-#define E4_YSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
-#define E4_YSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_YSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
-#define E4_YSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1
-#define E4_YSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5
-#define E4_YSTORM_RDMA_TASK_AG_CTX_VALID_MASK 0x1
-#define E4_YSTORM_RDMA_TASK_AG_CTX_VALID_SHIFT 6
-#define E4_YSTORM_RDMA_TASK_AG_CTX_DIF_FIRST_IO_MASK 0x1
-#define E4_YSTORM_RDMA_TASK_AG_CTX_DIF_FIRST_IO_SHIFT 7
+#define YSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
+#define YSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
+#define YSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
+#define YSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5
+#define YSTORM_RDMA_TASK_AG_CTX_VALID_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_VALID_SHIFT 6
+#define YSTORM_RDMA_TASK_AG_CTX_DIF_FIRST_IO_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_DIF_FIRST_IO_SHIFT 7
u8 flags1;
-#define E4_YSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3
-#define E4_YSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 0
-#define E4_YSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3
-#define E4_YSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 2
-#define E4_YSTORM_RDMA_TASK_AG_CTX_CF2SPECIAL_MASK 0x3
-#define E4_YSTORM_RDMA_TASK_AG_CTX_CF2SPECIAL_SHIFT 4
-#define E4_YSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1
-#define E4_YSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 6
-#define E4_YSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1
-#define E4_YSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 7
+#define YSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3
+#define YSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 0
+#define YSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3
+#define YSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 2
+#define YSTORM_RDMA_TASK_AG_CTX_CF2SPECIAL_MASK 0x3
+#define YSTORM_RDMA_TASK_AG_CTX_CF2SPECIAL_SHIFT 4
+#define YSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 6
+#define YSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 7
u8 flags2;
-#define E4_YSTORM_RDMA_TASK_AG_CTX_BIT4_MASK 0x1
-#define E4_YSTORM_RDMA_TASK_AG_CTX_BIT4_SHIFT 0
-#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1
-#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 1
-#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1
-#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 2
-#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1
-#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 3
-#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1
-#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 4
-#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1
-#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 5
-#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1
-#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 6
-#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1
-#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 7
+#define YSTORM_RDMA_TASK_AG_CTX_BIT4_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_BIT4_SHIFT 0
+#define YSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 1
+#define YSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 2
+#define YSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 3
+#define YSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 4
+#define YSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 5
+#define YSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 6
+#define YSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 7
u8 key;
__le32 mw_cnt_or_qp_id;
u8 ref_cnt_seq;
@@ -6696,49 +5098,49 @@ struct e4_ystorm_rdma_task_ag_ctx {
__le32 fbo_hi;
};
-struct e4_mstorm_rdma_task_ag_ctx {
+struct mstorm_rdma_task_ag_ctx {
u8 reserved;
u8 byte1;
__le16 icid;
u8 flags0;
-#define E4_MSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
-#define E4_MSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
-#define E4_MSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_MSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
-#define E4_MSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1
-#define E4_MSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5
-#define E4_MSTORM_RDMA_TASK_AG_CTX_BIT2_MASK 0x1
-#define E4_MSTORM_RDMA_TASK_AG_CTX_BIT2_SHIFT 6
-#define E4_MSTORM_RDMA_TASK_AG_CTX_DIF_FIRST_IO_MASK 0x1
-#define E4_MSTORM_RDMA_TASK_AG_CTX_DIF_FIRST_IO_SHIFT 7
+#define MSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
+#define MSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
+#define MSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
+#define MSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5
+#define MSTORM_RDMA_TASK_AG_CTX_BIT2_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_BIT2_SHIFT 6
+#define MSTORM_RDMA_TASK_AG_CTX_DIF_FIRST_IO_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_DIF_FIRST_IO_SHIFT 7
u8 flags1;
-#define E4_MSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3
-#define E4_MSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 0
-#define E4_MSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3
-#define E4_MSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 2
-#define E4_MSTORM_RDMA_TASK_AG_CTX_CF2_MASK 0x3
-#define E4_MSTORM_RDMA_TASK_AG_CTX_CF2_SHIFT 4
-#define E4_MSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1
-#define E4_MSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 6
-#define E4_MSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1
-#define E4_MSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 7
+#define MSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3
+#define MSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 0
+#define MSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3
+#define MSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 2
+#define MSTORM_RDMA_TASK_AG_CTX_CF2_MASK 0x3
+#define MSTORM_RDMA_TASK_AG_CTX_CF2_SHIFT 4
+#define MSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 6
+#define MSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 7
u8 flags2;
-#define E4_MSTORM_RDMA_TASK_AG_CTX_CF2EN_MASK 0x1
-#define E4_MSTORM_RDMA_TASK_AG_CTX_CF2EN_SHIFT 0
-#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1
-#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 1
-#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1
-#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 2
-#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1
-#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 3
-#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1
-#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 4
-#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1
-#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 5
-#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1
-#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 6
-#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1
-#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 7
+#define MSTORM_RDMA_TASK_AG_CTX_CF2EN_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_CF2EN_SHIFT 0
+#define MSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 1
+#define MSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 2
+#define MSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 3
+#define MSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 4
+#define MSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 5
+#define MSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 6
+#define MSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 7
u8 key;
__le32 mw_cnt_or_qp_id;
u8 ref_cnt_seq;
@@ -6762,56 +5164,56 @@ struct ustorm_rdma_task_st_ctx {
struct regpair temp[6];
};
-struct e4_ustorm_rdma_task_ag_ctx {
+struct ustorm_rdma_task_ag_ctx {
u8 reserved;
u8 state;
__le16 icid;
u8 flags0;
-#define E4_USTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
-#define E4_USTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
-#define E4_USTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_USTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
-#define E4_USTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1
-#define E4_USTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5
-#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_MASK 0x3
-#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_SHIFT 6
+#define USTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
+#define USTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
+#define USTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
+#define USTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5
+#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_MASK 0x3
+#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_SHIFT 6
u8 flags1;
-#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_RESULT_TOGGLE_BIT_MASK 0x3
-#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_RESULT_TOGGLE_BIT_SHIFT 0
-#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_TX_IO_FLG_MASK 0x3
-#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_TX_IO_FLG_SHIFT 2
-#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_BLOCK_SIZE_MASK 0x3
-#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_BLOCK_SIZE_SHIFT 4
-#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3
-#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6
+#define USTORM_RDMA_TASK_AG_CTX_DIF_RESULT_TOGGLE_BIT_MASK 0x3
+#define USTORM_RDMA_TASK_AG_CTX_DIF_RESULT_TOGGLE_BIT_SHIFT 0
+#define USTORM_RDMA_TASK_AG_CTX_DIF_TX_IO_FLG_MASK 0x3
+#define USTORM_RDMA_TASK_AG_CTX_DIF_TX_IO_FLG_SHIFT 2
+#define USTORM_RDMA_TASK_AG_CTX_DIF_BLOCK_SIZE_MASK 0x3
+#define USTORM_RDMA_TASK_AG_CTX_DIF_BLOCK_SIZE_SHIFT 4
+#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3
+#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6
u8 flags2;
-#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_EN_MASK 0x1
-#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_EN_SHIFT 0
-#define E4_USTORM_RDMA_TASK_AG_CTX_RESERVED2_MASK 0x1
-#define E4_USTORM_RDMA_TASK_AG_CTX_RESERVED2_SHIFT 1
-#define E4_USTORM_RDMA_TASK_AG_CTX_RESERVED3_MASK 0x1
-#define E4_USTORM_RDMA_TASK_AG_CTX_RESERVED3_SHIFT 2
-#define E4_USTORM_RDMA_TASK_AG_CTX_RESERVED4_MASK 0x1
-#define E4_USTORM_RDMA_TASK_AG_CTX_RESERVED4_SHIFT 3
-#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1
-#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4
-#define E4_USTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1
-#define E4_USTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 5
-#define E4_USTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1
-#define E4_USTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 6
-#define E4_USTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1
-#define E4_USTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 7
+#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_EN_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_EN_SHIFT 0
+#define USTORM_RDMA_TASK_AG_CTX_RESERVED2_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_RESERVED2_SHIFT 1
+#define USTORM_RDMA_TASK_AG_CTX_RESERVED3_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_RESERVED3_SHIFT 2
+#define USTORM_RDMA_TASK_AG_CTX_RESERVED4_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_RESERVED4_SHIFT 3
+#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4
+#define USTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 5
+#define USTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 6
+#define USTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 7
u8 flags3;
-#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_RXMIT_PROD_CONS_EN_MASK 0x1
-#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_RXMIT_PROD_CONS_EN_SHIFT 0
-#define E4_USTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1
-#define E4_USTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 1
-#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_PROD_CONS_EN_MASK 0x1
-#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_PROD_CONS_EN_SHIFT 2
-#define E4_USTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1
-#define E4_USTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 3
-#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF
-#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4
+#define USTORM_RDMA_TASK_AG_CTX_DIF_RXMIT_PROD_CONS_EN_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_DIF_RXMIT_PROD_CONS_EN_SHIFT 0
+#define USTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 1
+#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_PROD_CONS_EN_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_PROD_CONS_EN_SHIFT 2
+#define USTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 3
+#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF
+#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4
__le32 dif_err_intervals;
__le32 dif_error_1st_interval;
__le32 dif_rxmit_cons;
@@ -6828,16 +5230,853 @@ struct e4_ustorm_rdma_task_ag_ctx {
};
/* RDMA task context */
-struct e4_rdma_task_context {
+struct rdma_task_context {
struct ystorm_rdma_task_st_ctx ystorm_st_context;
- struct e4_ystorm_rdma_task_ag_ctx ystorm_ag_context;
+ struct ystorm_rdma_task_ag_ctx ystorm_ag_context;
struct tdif_task_context tdif_context;
- struct e4_mstorm_rdma_task_ag_ctx mstorm_ag_context;
+ struct mstorm_rdma_task_ag_ctx mstorm_ag_context;
struct mstorm_rdma_task_st_ctx mstorm_st_context;
struct rdif_task_context rdif_context;
struct ustorm_rdma_task_st_ctx ustorm_st_context;
struct regpair ustorm_st_padding[2];
- struct e4_ustorm_rdma_task_ag_ctx ustorm_ag_context;
+ struct ustorm_rdma_task_ag_ctx ustorm_ag_context;
+};
+
+#define TOE_MAX_RAMROD_PER_PF 8
+#define TOE_TX_PAGE_SIZE_BYTES 4096
+#define TOE_GRQ_PAGE_SIZE_BYTES 4096
+#define TOE_RX_CQ_PAGE_SIZE_BYTES 4096
+
+#define TOE_RX_MAX_RSS_CHAINS 64
+#define TOE_TX_MAX_TSS_CHAINS 64
+#define TOE_RSS_INDIRECTION_TABLE_SIZE 128
+
+/* The toe storm context of Mstorm */
+struct mstorm_toe_conn_st_ctx {
+ __le32 reserved[24];
+};
+
+/* The toe storm context of Pstorm */
+struct pstorm_toe_conn_st_ctx {
+ __le32 reserved[36];
+};
+
+/* The toe storm context of Ystorm */
+struct ystorm_toe_conn_st_ctx {
+ __le32 reserved[8];
+};
+
+/* The toe storm context of Xstorm */
+struct xstorm_toe_conn_st_ctx {
+ __le32 reserved[44];
+};
+
+struct ystorm_toe_conn_ag_ctx {
+ u8 byte0;
+ u8 byte1;
+ u8 flags0;
+#define YSTORM_TOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define YSTORM_TOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define YSTORM_TOE_CONN_AG_CTX_BIT1_MASK 0x1
+#define YSTORM_TOE_CONN_AG_CTX_BIT1_SHIFT 1
+#define YSTORM_TOE_CONN_AG_CTX_SLOW_PATH_CF_MASK 0x3
+#define YSTORM_TOE_CONN_AG_CTX_SLOW_PATH_CF_SHIFT 2
+#define YSTORM_TOE_CONN_AG_CTX_RESET_RECEIVED_CF_MASK 0x3
+#define YSTORM_TOE_CONN_AG_CTX_RESET_RECEIVED_CF_SHIFT 4
+#define YSTORM_TOE_CONN_AG_CTX_CF2_MASK 0x3
+#define YSTORM_TOE_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define YSTORM_TOE_CONN_AG_CTX_SLOW_PATH_CF_EN_MASK 0x1
+#define YSTORM_TOE_CONN_AG_CTX_SLOW_PATH_CF_EN_SHIFT 0
+#define YSTORM_TOE_CONN_AG_CTX_RESET_RECEIVED_CF_EN_MASK 0x1
+#define YSTORM_TOE_CONN_AG_CTX_RESET_RECEIVED_CF_EN_SHIFT 1
+#define YSTORM_TOE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define YSTORM_TOE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define YSTORM_TOE_CONN_AG_CTX_REL_SEQ_EN_MASK 0x1
+#define YSTORM_TOE_CONN_AG_CTX_REL_SEQ_EN_SHIFT 3
+#define YSTORM_TOE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define YSTORM_TOE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define YSTORM_TOE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define YSTORM_TOE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define YSTORM_TOE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define YSTORM_TOE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define YSTORM_TOE_CONN_AG_CTX_CONS_PROD_EN_MASK 0x1
+#define YSTORM_TOE_CONN_AG_CTX_CONS_PROD_EN_SHIFT 7
+ u8 completion_opcode;
+ u8 byte3;
+ __le16 word0;
+ __le32 rel_seq;
+ __le32 rel_seq_threshold;
+ __le16 app_prod;
+ __le16 app_cons;
+ __le16 word3;
+ __le16 word4;
+ __le32 reg2;
+ __le32 reg3;
+};
+
+struct xstorm_toe_conn_ag_ctx {
+ u8 reserved0;
+ u8 state;
+ u8 flags0;
+#define XSTORM_TOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define XSTORM_TOE_CONN_AG_CTX_EXIST_IN_QM1_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_EXIST_IN_QM1_SHIFT 1
+#define XSTORM_TOE_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_RESERVED1_SHIFT 2
+#define XSTORM_TOE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define XSTORM_TOE_CONN_AG_CTX_TX_DEC_RULE_RES_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_TX_DEC_RULE_RES_SHIFT 4
+#define XSTORM_TOE_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_RESERVED2_SHIFT 5
+#define XSTORM_TOE_CONN_AG_CTX_BIT6_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_BIT6_SHIFT 6
+#define XSTORM_TOE_CONN_AG_CTX_BIT7_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_BIT7_SHIFT 7
+ u8 flags1;
+#define XSTORM_TOE_CONN_AG_CTX_BIT8_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_BIT8_SHIFT 0
+#define XSTORM_TOE_CONN_AG_CTX_BIT9_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_BIT9_SHIFT 1
+#define XSTORM_TOE_CONN_AG_CTX_BIT10_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_BIT10_SHIFT 2
+#define XSTORM_TOE_CONN_AG_CTX_BIT11_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_BIT11_SHIFT 3
+#define XSTORM_TOE_CONN_AG_CTX_BIT12_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_BIT12_SHIFT 4
+#define XSTORM_TOE_CONN_AG_CTX_BIT13_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_BIT13_SHIFT 5
+#define XSTORM_TOE_CONN_AG_CTX_BIT14_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_BIT14_SHIFT 6
+#define XSTORM_TOE_CONN_AG_CTX_BIT15_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_BIT15_SHIFT 7
+ u8 flags2;
+#define XSTORM_TOE_CONN_AG_CTX_CF0_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_CF0_SHIFT 0
+#define XSTORM_TOE_CONN_AG_CTX_CF1_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_CF1_SHIFT 2
+#define XSTORM_TOE_CONN_AG_CTX_CF2_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_CF2_SHIFT 4
+#define XSTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 6
+ u8 flags3;
+#define XSTORM_TOE_CONN_AG_CTX_CF4_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_CF4_SHIFT 0
+#define XSTORM_TOE_CONN_AG_CTX_CF5_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_CF5_SHIFT 2
+#define XSTORM_TOE_CONN_AG_CTX_CF6_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_CF6_SHIFT 4
+#define XSTORM_TOE_CONN_AG_CTX_CF7_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_CF7_SHIFT 6
+ u8 flags4;
+#define XSTORM_TOE_CONN_AG_CTX_CF8_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_CF8_SHIFT 0
+#define XSTORM_TOE_CONN_AG_CTX_CF9_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_CF9_SHIFT 2
+#define XSTORM_TOE_CONN_AG_CTX_CF10_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_CF10_SHIFT 4
+#define XSTORM_TOE_CONN_AG_CTX_CF11_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_CF11_SHIFT 6
+ u8 flags5;
+#define XSTORM_TOE_CONN_AG_CTX_CF12_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_CF12_SHIFT 0
+#define XSTORM_TOE_CONN_AG_CTX_CF13_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_CF13_SHIFT 2
+#define XSTORM_TOE_CONN_AG_CTX_CF14_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_CF14_SHIFT 4
+#define XSTORM_TOE_CONN_AG_CTX_CF15_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_CF15_SHIFT 6
+ u8 flags6;
+#define XSTORM_TOE_CONN_AG_CTX_CF16_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_CF16_SHIFT 0
+#define XSTORM_TOE_CONN_AG_CTX_CF17_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_CF17_SHIFT 2
+#define XSTORM_TOE_CONN_AG_CTX_CF18_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_CF18_SHIFT 4
+#define XSTORM_TOE_CONN_AG_CTX_DQ_FLUSH_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_DQ_FLUSH_SHIFT 6
+ u8 flags7;
+#define XSTORM_TOE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define XSTORM_TOE_CONN_AG_CTX_FLUSH_Q1_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_FLUSH_Q1_SHIFT 2
+#define XSTORM_TOE_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define XSTORM_TOE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_CF0EN_SHIFT 6
+#define XSTORM_TOE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_CF1EN_SHIFT 7
+ u8 flags8;
+#define XSTORM_TOE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_CF2EN_SHIFT 0
+#define XSTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 1
+#define XSTORM_TOE_CONN_AG_CTX_CF4EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_CF4EN_SHIFT 2
+#define XSTORM_TOE_CONN_AG_CTX_CF5EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_CF5EN_SHIFT 3
+#define XSTORM_TOE_CONN_AG_CTX_CF6EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_CF6EN_SHIFT 4
+#define XSTORM_TOE_CONN_AG_CTX_CF7EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_CF7EN_SHIFT 5
+#define XSTORM_TOE_CONN_AG_CTX_CF8EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_CF8EN_SHIFT 6
+#define XSTORM_TOE_CONN_AG_CTX_CF9EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_CF9EN_SHIFT 7
+ u8 flags9;
+#define XSTORM_TOE_CONN_AG_CTX_CF10EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_CF10EN_SHIFT 0
+#define XSTORM_TOE_CONN_AG_CTX_CF11EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_CF11EN_SHIFT 1
+#define XSTORM_TOE_CONN_AG_CTX_CF12EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_CF12EN_SHIFT 2
+#define XSTORM_TOE_CONN_AG_CTX_CF13EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_CF13EN_SHIFT 3
+#define XSTORM_TOE_CONN_AG_CTX_CF14EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_CF14EN_SHIFT 4
+#define XSTORM_TOE_CONN_AG_CTX_CF15EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_CF15EN_SHIFT 5
+#define XSTORM_TOE_CONN_AG_CTX_CF16EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_CF16EN_SHIFT 6
+#define XSTORM_TOE_CONN_AG_CTX_CF17EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_CF17EN_SHIFT 7
+ u8 flags10;
+#define XSTORM_TOE_CONN_AG_CTX_CF18EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_CF18EN_SHIFT 0
+#define XSTORM_TOE_CONN_AG_CTX_DQ_FLUSH_EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_DQ_FLUSH_EN_SHIFT 1
+#define XSTORM_TOE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
+#define XSTORM_TOE_CONN_AG_CTX_FLUSH_Q1_EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_FLUSH_Q1_EN_SHIFT 3
+#define XSTORM_TOE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define XSTORM_TOE_CONN_AG_CTX_CF23EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_CF23EN_SHIFT 5
+#define XSTORM_TOE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_RULE0EN_SHIFT 6
+#define XSTORM_TOE_CONN_AG_CTX_MORE_TO_SEND_RULE_EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_MORE_TO_SEND_RULE_EN_SHIFT 7
+ u8 flags11;
+#define XSTORM_TOE_CONN_AG_CTX_TX_BLOCKED_EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_TX_BLOCKED_EN_SHIFT 0
+#define XSTORM_TOE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_RULE3EN_SHIFT 1
+#define XSTORM_TOE_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_RESERVED3_SHIFT 2
+#define XSTORM_TOE_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define XSTORM_TOE_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define XSTORM_TOE_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define XSTORM_TOE_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_RULE9EN_SHIFT 7
+ u8 flags12;
+#define XSTORM_TOE_CONN_AG_CTX_RULE10EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_RULE10EN_SHIFT 0
+#define XSTORM_TOE_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define XSTORM_TOE_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_RULE14EN_SHIFT 4
+#define XSTORM_TOE_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define XSTORM_TOE_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define XSTORM_TOE_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_RULE17EN_SHIFT 7
+ u8 flags13;
+#define XSTORM_TOE_CONN_AG_CTX_RULE18EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_RULE18EN_SHIFT 0
+#define XSTORM_TOE_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_RULE19EN_SHIFT 1
+#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+ u8 flags14;
+#define XSTORM_TOE_CONN_AG_CTX_BIT16_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_BIT16_SHIFT 0
+#define XSTORM_TOE_CONN_AG_CTX_BIT17_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_BIT17_SHIFT 1
+#define XSTORM_TOE_CONN_AG_CTX_BIT18_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_BIT18_SHIFT 2
+#define XSTORM_TOE_CONN_AG_CTX_BIT19_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_BIT19_SHIFT 3
+#define XSTORM_TOE_CONN_AG_CTX_BIT20_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_BIT20_SHIFT 4
+#define XSTORM_TOE_CONN_AG_CTX_BIT21_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_BIT21_SHIFT 5
+#define XSTORM_TOE_CONN_AG_CTX_CF23_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_CF23_SHIFT 6
+ u8 byte2;
+ __le16 physical_q0;
+ __le16 physical_q1;
+ __le16 word2;
+ __le16 word3;
+ __le16 bd_prod;
+ __le16 word5;
+ __le16 word6;
+ u8 byte3;
+ u8 byte4;
+ u8 byte5;
+ u8 byte6;
+ __le32 reg0;
+ __le32 reg1;
+ __le32 reg2;
+ __le32 more_to_send_seq;
+ __le32 local_adv_wnd_seq;
+ __le32 reg5;
+ __le32 reg6;
+ __le16 word7;
+ __le16 word8;
+ __le16 word9;
+ __le16 word10;
+ __le32 reg7;
+ __le32 reg8;
+ __le32 reg9;
+ u8 byte7;
+ u8 byte8;
+ u8 byte9;
+ u8 byte10;
+ u8 byte11;
+ u8 byte12;
+ u8 byte13;
+ u8 byte14;
+ u8 byte15;
+ u8 e5_reserved;
+ __le16 word11;
+ __le32 reg10;
+ __le32 reg11;
+ __le32 reg12;
+ __le32 reg13;
+ __le32 reg14;
+ __le32 reg15;
+ __le32 reg16;
+ __le32 reg17;
+};
+
+struct tstorm_toe_conn_ag_ctx {
+ u8 reserved0;
+ u8 byte1;
+ u8 flags0;
+#define TSTORM_TOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define TSTORM_TOE_CONN_AG_CTX_BIT1_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_BIT1_SHIFT 1
+#define TSTORM_TOE_CONN_AG_CTX_BIT2_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_BIT2_SHIFT 2
+#define TSTORM_TOE_CONN_AG_CTX_BIT3_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_BIT3_SHIFT 3
+#define TSTORM_TOE_CONN_AG_CTX_BIT4_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_BIT4_SHIFT 4
+#define TSTORM_TOE_CONN_AG_CTX_BIT5_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_BIT5_SHIFT 5
+#define TSTORM_TOE_CONN_AG_CTX_TIMEOUT_CF_MASK 0x3
+#define TSTORM_TOE_CONN_AG_CTX_TIMEOUT_CF_SHIFT 6
+ u8 flags1;
+#define TSTORM_TOE_CONN_AG_CTX_CF1_MASK 0x3
+#define TSTORM_TOE_CONN_AG_CTX_CF1_SHIFT 0
+#define TSTORM_TOE_CONN_AG_CTX_CF2_MASK 0x3
+#define TSTORM_TOE_CONN_AG_CTX_CF2_SHIFT 2
+#define TSTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3
+#define TSTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 4
+#define TSTORM_TOE_CONN_AG_CTX_CF4_MASK 0x3
+#define TSTORM_TOE_CONN_AG_CTX_CF4_SHIFT 6
+ u8 flags2;
+#define TSTORM_TOE_CONN_AG_CTX_CF5_MASK 0x3
+#define TSTORM_TOE_CONN_AG_CTX_CF5_SHIFT 0
+#define TSTORM_TOE_CONN_AG_CTX_CF6_MASK 0x3
+#define TSTORM_TOE_CONN_AG_CTX_CF6_SHIFT 2
+#define TSTORM_TOE_CONN_AG_CTX_CF7_MASK 0x3
+#define TSTORM_TOE_CONN_AG_CTX_CF7_SHIFT 4
+#define TSTORM_TOE_CONN_AG_CTX_CF8_MASK 0x3
+#define TSTORM_TOE_CONN_AG_CTX_CF8_SHIFT 6
+ u8 flags3;
+#define TSTORM_TOE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
+#define TSTORM_TOE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define TSTORM_TOE_CONN_AG_CTX_CF10_MASK 0x3
+#define TSTORM_TOE_CONN_AG_CTX_CF10_SHIFT 2
+#define TSTORM_TOE_CONN_AG_CTX_TIMEOUT_CF_EN_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_TIMEOUT_CF_EN_SHIFT 4
+#define TSTORM_TOE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_CF1EN_SHIFT 5
+#define TSTORM_TOE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_CF2EN_SHIFT 6
+#define TSTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 7
+ u8 flags4;
+#define TSTORM_TOE_CONN_AG_CTX_CF4EN_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_CF4EN_SHIFT 0
+#define TSTORM_TOE_CONN_AG_CTX_CF5EN_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_CF5EN_SHIFT 1
+#define TSTORM_TOE_CONN_AG_CTX_CF6EN_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_CF6EN_SHIFT 2
+#define TSTORM_TOE_CONN_AG_CTX_CF7EN_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_CF7EN_SHIFT 3
+#define TSTORM_TOE_CONN_AG_CTX_CF8EN_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_CF8EN_SHIFT 4
+#define TSTORM_TOE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 5
+#define TSTORM_TOE_CONN_AG_CTX_CF10EN_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_CF10EN_SHIFT 6
+#define TSTORM_TOE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_RULE0EN_SHIFT 7
+ u8 flags5;
+#define TSTORM_TOE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define TSTORM_TOE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define TSTORM_TOE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define TSTORM_TOE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define TSTORM_TOE_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define TSTORM_TOE_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define TSTORM_TOE_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define TSTORM_TOE_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_RULE8EN_SHIFT 7
+ __le32 reg0;
+ __le32 reg1;
+ __le32 reg2;
+ __le32 reg3;
+ __le32 reg4;
+ __le32 reg5;
+ __le32 reg6;
+ __le32 reg7;
+ __le32 reg8;
+ u8 byte2;
+ u8 byte3;
+ __le16 word0;
+};
+
+struct ustorm_toe_conn_ag_ctx {
+ u8 reserved;
+ u8 byte1;
+ u8 flags0;
+#define USTORM_TOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define USTORM_TOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define USTORM_TOE_CONN_AG_CTX_BIT1_MASK 0x1
+#define USTORM_TOE_CONN_AG_CTX_BIT1_SHIFT 1
+#define USTORM_TOE_CONN_AG_CTX_CF0_MASK 0x3
+#define USTORM_TOE_CONN_AG_CTX_CF0_SHIFT 2
+#define USTORM_TOE_CONN_AG_CTX_CF1_MASK 0x3
+#define USTORM_TOE_CONN_AG_CTX_CF1_SHIFT 4
+#define USTORM_TOE_CONN_AG_CTX_PUSH_TIMER_CF_MASK 0x3
+#define USTORM_TOE_CONN_AG_CTX_PUSH_TIMER_CF_SHIFT 6
+ u8 flags1;
+#define USTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3
+#define USTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 0
+#define USTORM_TOE_CONN_AG_CTX_SLOW_PATH_CF_MASK 0x3
+#define USTORM_TOE_CONN_AG_CTX_SLOW_PATH_CF_SHIFT 2
+#define USTORM_TOE_CONN_AG_CTX_DQ_CF_MASK 0x3
+#define USTORM_TOE_CONN_AG_CTX_DQ_CF_SHIFT 4
+#define USTORM_TOE_CONN_AG_CTX_CF6_MASK 0x3
+#define USTORM_TOE_CONN_AG_CTX_CF6_SHIFT 6
+ u8 flags2;
+#define USTORM_TOE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define USTORM_TOE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define USTORM_TOE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define USTORM_TOE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define USTORM_TOE_CONN_AG_CTX_PUSH_TIMER_CF_EN_MASK 0x1
+#define USTORM_TOE_CONN_AG_CTX_PUSH_TIMER_CF_EN_SHIFT 2
+#define USTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
+#define USTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 3
+#define USTORM_TOE_CONN_AG_CTX_SLOW_PATH_CF_EN_MASK 0x1
+#define USTORM_TOE_CONN_AG_CTX_SLOW_PATH_CF_EN_SHIFT 4
+#define USTORM_TOE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
+#define USTORM_TOE_CONN_AG_CTX_DQ_CF_EN_SHIFT 5
+#define USTORM_TOE_CONN_AG_CTX_CF6EN_MASK 0x1
+#define USTORM_TOE_CONN_AG_CTX_CF6EN_SHIFT 6
+#define USTORM_TOE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define USTORM_TOE_CONN_AG_CTX_RULE0EN_SHIFT 7
+ u8 flags3;
+#define USTORM_TOE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define USTORM_TOE_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define USTORM_TOE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define USTORM_TOE_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define USTORM_TOE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define USTORM_TOE_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define USTORM_TOE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define USTORM_TOE_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define USTORM_TOE_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define USTORM_TOE_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define USTORM_TOE_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define USTORM_TOE_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define USTORM_TOE_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define USTORM_TOE_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define USTORM_TOE_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define USTORM_TOE_CONN_AG_CTX_RULE8EN_SHIFT 7
+ u8 byte2;
+ u8 byte3;
+ __le16 word0;
+ __le16 word1;
+ __le32 reg0;
+ __le32 reg1;
+ __le32 reg2;
+ __le32 reg3;
+ __le16 word2;
+ __le16 word3;
+};
+
+/* The toe storm context of Tstorm */
+struct tstorm_toe_conn_st_ctx {
+ __le32 reserved[16];
+};
+
+/* The toe storm context of Ustorm */
+struct ustorm_toe_conn_st_ctx {
+ __le32 reserved[52];
+};
+
+/* toe connection context */
+struct toe_conn_context {
+ struct ystorm_toe_conn_st_ctx ystorm_st_context;
+ struct pstorm_toe_conn_st_ctx pstorm_st_context;
+ struct regpair pstorm_st_padding[2];
+ struct xstorm_toe_conn_st_ctx xstorm_st_context;
+ struct regpair xstorm_st_padding[2];
+ struct ystorm_toe_conn_ag_ctx ystorm_ag_context;
+ struct xstorm_toe_conn_ag_ctx xstorm_ag_context;
+ struct tstorm_toe_conn_ag_ctx tstorm_ag_context;
+ struct regpair tstorm_ag_padding[2];
+ struct timers_context timer_context;
+ struct ustorm_toe_conn_ag_ctx ustorm_ag_context;
+ struct tstorm_toe_conn_st_ctx tstorm_st_context;
+ struct mstorm_toe_conn_st_ctx mstorm_st_context;
+ struct ustorm_toe_conn_st_ctx ustorm_st_context;
+};
+
+/* toe init ramrod header */
+struct toe_init_ramrod_header {
+ u8 first_rss;
+ u8 num_rss;
+ u8 reserved[6];
+};
+
+/* toe pf init parameters */
+struct toe_pf_init_params {
+ __le32 push_timeout;
+ __le16 grq_buffer_size;
+ __le16 grq_sb_id;
+ u8 grq_sb_index;
+ u8 max_seg_retransmit;
+ u8 doubt_reachability;
+ u8 ll2_rx_queue_id;
+ __le16 grq_fetch_threshold;
+ u8 reserved1[2];
+ struct regpair grq_page_addr;
+};
+
+/* toe tss parameters */
+struct toe_tss_params {
+ struct regpair curr_page_addr;
+ struct regpair next_page_addr;
+ u8 reserved0;
+ u8 status_block_index;
+ __le16 status_block_id;
+ __le16 reserved1[2];
+};
+
+/* toe rss parameters */
+struct toe_rss_params {
+ struct regpair curr_page_addr;
+ struct regpair next_page_addr;
+ u8 reserved0;
+ u8 status_block_index;
+ __le16 status_block_id;
+ __le16 reserved1[2];
+};
+
+/* toe init ramrod data */
+struct toe_init_ramrod_data {
+ struct toe_init_ramrod_header hdr;
+ struct tcp_init_params tcp_params;
+ struct toe_pf_init_params pf_params;
+ struct toe_tss_params tss_params[TOE_TX_MAX_TSS_CHAINS];
+ struct toe_rss_params rss_params[TOE_RX_MAX_RSS_CHAINS];
+};
+
+/* toe offload parameters */
+struct toe_offload_params {
+ struct regpair tx_bd_page_addr;
+ struct regpair tx_app_page_addr;
+ __le32 more_to_send_seq;
+ __le16 rcv_indication_size;
+ u8 rss_tss_id;
+ u8 ignore_grq_push;
+ struct regpair rx_db_data_ptr;
+};
+
+/* TOE offload ramrod data - DMAed by firmware */
+struct toe_offload_ramrod_data {
+ struct tcp_offload_params tcp_ofld_params;
+ struct toe_offload_params toe_ofld_params;
+};
+
+/* TOE ramrod command IDs */
+enum toe_ramrod_cmd_id {
+ TOE_RAMROD_UNUSED,
+ TOE_RAMROD_FUNC_INIT,
+ TOE_RAMROD_INITATE_OFFLOAD,
+ TOE_RAMROD_FUNC_CLOSE,
+ TOE_RAMROD_SEARCHER_DELETE,
+ TOE_RAMROD_TERMINATE,
+ TOE_RAMROD_QUERY,
+ TOE_RAMROD_UPDATE,
+ TOE_RAMROD_EMPTY,
+ TOE_RAMROD_RESET_SEND,
+ TOE_RAMROD_INVALIDATE,
+ MAX_TOE_RAMROD_CMD_ID
+};
+
+/* Toe RQ buffer descriptor */
+struct toe_rx_bd {
+ struct regpair addr;
+ __le16 size;
+ __le16 flags;
+#define TOE_RX_BD_START_MASK 0x1
+#define TOE_RX_BD_START_SHIFT 0
+#define TOE_RX_BD_END_MASK 0x1
+#define TOE_RX_BD_END_SHIFT 1
+#define TOE_RX_BD_NO_PUSH_MASK 0x1
+#define TOE_RX_BD_NO_PUSH_SHIFT 2
+#define TOE_RX_BD_SPLIT_MASK 0x1
+#define TOE_RX_BD_SPLIT_SHIFT 3
+#define TOE_RX_BD_RESERVED0_MASK 0xFFF
+#define TOE_RX_BD_RESERVED0_SHIFT 4
+ __le32 reserved1;
+};
+
+/* TOE RX completion queue opcodes (opcode 0 is illegal) */
+enum toe_rx_cmp_opcode {
+ TOE_RX_CMP_OPCODE_GA = 1,
+ TOE_RX_CMP_OPCODE_GR = 2,
+ TOE_RX_CMP_OPCODE_GNI = 3,
+ TOE_RX_CMP_OPCODE_GAIR = 4,
+ TOE_RX_CMP_OPCODE_GAIL = 5,
+ TOE_RX_CMP_OPCODE_GRI = 6,
+ TOE_RX_CMP_OPCODE_GJ = 7,
+ TOE_RX_CMP_OPCODE_DGI = 8,
+ TOE_RX_CMP_OPCODE_CMP = 9,
+ TOE_RX_CMP_OPCODE_REL = 10,
+ TOE_RX_CMP_OPCODE_SKP = 11,
+ TOE_RX_CMP_OPCODE_URG = 12,
+ TOE_RX_CMP_OPCODE_RT_TO = 13,
+ TOE_RX_CMP_OPCODE_KA_TO = 14,
+ TOE_RX_CMP_OPCODE_MAX_RT = 15,
+ TOE_RX_CMP_OPCODE_DBT_RE = 16,
+ TOE_RX_CMP_OPCODE_SYN = 17,
+ TOE_RX_CMP_OPCODE_OPT_ERR = 18,
+ TOE_RX_CMP_OPCODE_FW2_TO = 19,
+ TOE_RX_CMP_OPCODE_2WY_CLS = 20,
+ TOE_RX_CMP_OPCODE_RST_RCV = 21,
+ TOE_RX_CMP_OPCODE_FIN_RCV = 22,
+ TOE_RX_CMP_OPCODE_FIN_UPL = 23,
+ TOE_RX_CMP_OPCODE_INIT = 32,
+ TOE_RX_CMP_OPCODE_RSS_UPDATE = 33,
+ TOE_RX_CMP_OPCODE_CLOSE = 34,
+ TOE_RX_CMP_OPCODE_INITIATE_OFFLOAD = 80,
+ TOE_RX_CMP_OPCODE_SEARCHER_DELETE = 81,
+ TOE_RX_CMP_OPCODE_TERMINATE = 82,
+ TOE_RX_CMP_OPCODE_QUERY = 83,
+ TOE_RX_CMP_OPCODE_RESET_SEND = 84,
+ TOE_RX_CMP_OPCODE_INVALIDATE = 85,
+ TOE_RX_CMP_OPCODE_EMPTY = 86,
+ TOE_RX_CMP_OPCODE_UPDATE = 87,
+ MAX_TOE_RX_CMP_OPCODE
+};
+
+/* TOE rx ooo completion data */
+struct toe_rx_cqe_ooo_params {
+ __le32 nbytes;
+ __le16 grq_buff_id;
+ u8 isle_num;
+ u8 reserved0;
+};
+
+/* TOE rx in order completion data */
+struct toe_rx_cqe_in_order_params {
+ __le32 nbytes;
+ __le16 grq_buff_id;
+ __le16 reserved1;
+};
+
+/* Union for TOE rx completion data */
+union toe_rx_cqe_data_union {
+ struct toe_rx_cqe_ooo_params ooo_params;
+ struct toe_rx_cqe_in_order_params in_order_params;
+ struct regpair raw_data;
+};
+
+/* TOE rx completion element */
+struct toe_rx_cqe {
+ __le16 icid;
+ u8 completion_opcode;
+ u8 reserved0;
+ __le32 reserved1;
+ union toe_rx_cqe_data_union data;
+};
+
+/* toe RX doorbel data */
+struct toe_rx_db_data {
+ __le32 local_adv_wnd_seq;
+ __le32 reserved[3];
+};
+
+/* Toe GRQ buffer descriptor */
+struct toe_rx_grq_bd {
+ struct regpair addr;
+ __le16 buff_id;
+ __le16 reserved0;
+ __le32 reserved1;
+};
+
+/* Toe transmission application buffer descriptor */
+struct toe_tx_app_buff_desc {
+ __le32 next_buffer_start_seq;
+ __le32 reserved;
+};
+
+/* Toe transmission application buffer descriptor page pointer */
+struct toe_tx_app_buff_page_pointer {
+ struct regpair next_page_addr;
+};
+
+/* Toe transmission buffer descriptor */
+struct toe_tx_bd {
+ struct regpair addr;
+ __le16 size;
+ __le16 flags;
+#define TOE_TX_BD_PUSH_MASK 0x1
+#define TOE_TX_BD_PUSH_SHIFT 0
+#define TOE_TX_BD_NOTIFY_MASK 0x1
+#define TOE_TX_BD_NOTIFY_SHIFT 1
+#define TOE_TX_BD_LARGE_IO_MASK 0x1
+#define TOE_TX_BD_LARGE_IO_SHIFT 2
+#define TOE_TX_BD_BD_CONS_MASK 0x1FFF
+#define TOE_TX_BD_BD_CONS_SHIFT 3
+ __le32 next_bd_start_seq;
+};
+
+/* TOE completion opcodes */
+enum toe_tx_cmp_opcode {
+ TOE_TX_CMP_OPCODE_DATA,
+ TOE_TX_CMP_OPCODE_TERMINATE,
+ TOE_TX_CMP_OPCODE_EMPTY,
+ TOE_TX_CMP_OPCODE_RESET_SEND,
+ TOE_TX_CMP_OPCODE_INVALIDATE,
+ TOE_TX_CMP_OPCODE_RST_RCV,
+ MAX_TOE_TX_CMP_OPCODE
+};
+
+/* Toe transmission completion element */
+struct toe_tx_cqe {
+ __le16 icid;
+ u8 opcode;
+ u8 reserved;
+ __le32 size;
+};
+
+/* Toe transmission page pointer bd */
+struct toe_tx_page_pointer_bd {
+ struct regpair next_page_addr;
+ struct regpair prev_page_addr;
+};
+
+/* Toe transmission completion element page pointer */
+struct toe_tx_page_pointer_cqe {
+ struct regpair next_page_addr;
+};
+
+/* toe update parameters */
+struct toe_update_params {
+ __le16 flags;
+#define TOE_UPDATE_PARAMS_RCV_INDICATION_SIZE_CHANGED_MASK 0x1
+#define TOE_UPDATE_PARAMS_RCV_INDICATION_SIZE_CHANGED_SHIFT 0
+#define TOE_UPDATE_PARAMS_RESERVED_MASK 0x7FFF
+#define TOE_UPDATE_PARAMS_RESERVED_SHIFT 1
+ __le16 rcv_indication_size;
+ __le16 reserved1[2];
+};
+
+/* TOE update ramrod data - DMAed by firmware */
+struct toe_update_ramrod_data {
+ struct tcp_update_params tcp_upd_params;
+ struct toe_update_params toe_upd_params;
+};
+
+struct mstorm_toe_conn_ag_ctx {
+ u8 byte0;
+ u8 byte1;
+ u8 flags0;
+#define MSTORM_TOE_CONN_AG_CTX_BIT0_MASK 0x1
+#define MSTORM_TOE_CONN_AG_CTX_BIT0_SHIFT 0
+#define MSTORM_TOE_CONN_AG_CTX_BIT1_MASK 0x1
+#define MSTORM_TOE_CONN_AG_CTX_BIT1_SHIFT 1
+#define MSTORM_TOE_CONN_AG_CTX_CF0_MASK 0x3
+#define MSTORM_TOE_CONN_AG_CTX_CF0_SHIFT 2
+#define MSTORM_TOE_CONN_AG_CTX_CF1_MASK 0x3
+#define MSTORM_TOE_CONN_AG_CTX_CF1_SHIFT 4
+#define MSTORM_TOE_CONN_AG_CTX_CF2_MASK 0x3
+#define MSTORM_TOE_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define MSTORM_TOE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define MSTORM_TOE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define MSTORM_TOE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define MSTORM_TOE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define MSTORM_TOE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define MSTORM_TOE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define MSTORM_TOE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define MSTORM_TOE_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define MSTORM_TOE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define MSTORM_TOE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define MSTORM_TOE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define MSTORM_TOE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define MSTORM_TOE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define MSTORM_TOE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define MSTORM_TOE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define MSTORM_TOE_CONN_AG_CTX_RULE4EN_SHIFT 7
+ __le16 word0;
+ __le16 word1;
+ __le32 reg0;
+ __le32 reg1;
+};
+
+/* TOE doorbell data */
+struct toe_db_data {
+ u8 params;
+#define TOE_DB_DATA_DEST_MASK 0x3
+#define TOE_DB_DATA_DEST_SHIFT 0
+#define TOE_DB_DATA_AGG_CMD_MASK 0x3
+#define TOE_DB_DATA_AGG_CMD_SHIFT 2
+#define TOE_DB_DATA_BYPASS_EN_MASK 0x1
+#define TOE_DB_DATA_BYPASS_EN_SHIFT 4
+#define TOE_DB_DATA_RESERVED_MASK 0x1
+#define TOE_DB_DATA_RESERVED_SHIFT 5
+#define TOE_DB_DATA_AGG_VAL_SEL_MASK 0x3
+#define TOE_DB_DATA_AGG_VAL_SEL_SHIFT 6
+ u8 agg_flags;
+ __le16 bd_prod;
};
/* rdma function init ramrod data */
@@ -6911,6 +6150,8 @@ enum rdma_event_opcode {
RDMA_EVENT_CREATE_SRQ,
RDMA_EVENT_MODIFY_SRQ,
RDMA_EVENT_DESTROY_SRQ,
+ RDMA_EVENT_START_NAMESPACE_TRACKING,
+ RDMA_EVENT_STOP_NAMESPACE_TRACKING,
MAX_RDMA_EVENT_OPCODE
};
@@ -6935,18 +6176,33 @@ struct rdma_init_func_hdr {
u8 relaxed_ordering;
__le16 first_reg_srq_id;
__le32 reg_srq_base_addr;
- u8 searcher_mode;
- u8 pvrdma_mode;
+ u8 flags;
+#define RDMA_INIT_FUNC_HDR_SEARCHER_MODE_MASK 0x1
+#define RDMA_INIT_FUNC_HDR_SEARCHER_MODE_SHIFT 0
+#define RDMA_INIT_FUNC_HDR_PVRDMA_MODE_MASK 0x1
+#define RDMA_INIT_FUNC_HDR_PVRDMA_MODE_SHIFT 1
+#define RDMA_INIT_FUNC_HDR_DPT_MODE_MASK 0x1
+#define RDMA_INIT_FUNC_HDR_DPT_MODE_SHIFT 2
+#define RDMA_INIT_FUNC_HDR_RESERVED0_MASK 0x1F
+#define RDMA_INIT_FUNC_HDR_RESERVED0_SHIFT 3
+ u8 dpt_byte_threshold_log;
+ u8 dpt_common_queue_id;
u8 max_num_ns_log;
- u8 reserved;
};
/* rdma function init ramrod data */
struct rdma_init_func_ramrod_data {
struct rdma_init_func_hdr params_header;
+ struct rdma_cnq_params dptq_params;
struct rdma_cnq_params cnq_params[NUM_OF_GLOBAL_QUEUES];
};
+/* rdma namespace tracking ramrod data */
+struct rdma_namespace_tracking_ramrod_data {
+ u8 name_space;
+ u8 reserved[7];
+};
+
/* RDMA ramrod command IDs */
enum rdma_ramrod_cmd_id {
RDMA_RAMROD_UNUSED,
@@ -6960,6 +6216,8 @@ enum rdma_ramrod_cmd_id {
RDMA_RAMROD_CREATE_SRQ,
RDMA_RAMROD_MODIFY_SRQ,
RDMA_RAMROD_DESTROY_SRQ,
+ RDMA_RAMROD_START_NS_TRACKING,
+ RDMA_RAMROD_STOP_NS_TRACKING,
MAX_RDMA_RAMROD_CMD_ID
};
@@ -7093,73 +6351,73 @@ struct rdma_xrc_srq_context {
struct regpair temp[9];
};
-struct e4_tstorm_rdma_task_ag_ctx {
+struct tstorm_rdma_task_ag_ctx {
u8 byte0;
u8 byte1;
__le16 word0;
u8 flags0;
-#define E4_TSTORM_RDMA_TASK_AG_CTX_NIBBLE0_MASK 0xF
-#define E4_TSTORM_RDMA_TASK_AG_CTX_NIBBLE0_SHIFT 0
-#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT0_MASK 0x1
-#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT0_SHIFT 4
-#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1
-#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5
-#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT2_MASK 0x1
-#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT2_SHIFT 6
-#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT3_MASK 0x1
-#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT 7
+#define TSTORM_RDMA_TASK_AG_CTX_NIBBLE0_MASK 0xF
+#define TSTORM_RDMA_TASK_AG_CTX_NIBBLE0_SHIFT 0
+#define TSTORM_RDMA_TASK_AG_CTX_BIT0_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_BIT0_SHIFT 4
+#define TSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5
+#define TSTORM_RDMA_TASK_AG_CTX_BIT2_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_BIT2_SHIFT 6
+#define TSTORM_RDMA_TASK_AG_CTX_BIT3_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT 7
u8 flags1;
-#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT4_MASK 0x1
-#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT4_SHIFT 0
-#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT5_MASK 0x1
-#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT5_SHIFT 1
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 2
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 4
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF2_MASK 0x3
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF2_SHIFT 6
+#define TSTORM_RDMA_TASK_AG_CTX_BIT4_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_BIT4_SHIFT 0
+#define TSTORM_RDMA_TASK_AG_CTX_BIT5_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_BIT5_SHIFT 1
+#define TSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3
+#define TSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 2
+#define TSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3
+#define TSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 4
+#define TSTORM_RDMA_TASK_AG_CTX_CF2_MASK 0x3
+#define TSTORM_RDMA_TASK_AG_CTX_CF2_SHIFT 6
u8 flags2;
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF3_MASK 0x3
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF3_SHIFT 0
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF4_MASK 0x3
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF4_SHIFT 2
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF5_MASK 0x3
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF5_SHIFT 4
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF6_MASK 0x3
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF6_SHIFT 6
+#define TSTORM_RDMA_TASK_AG_CTX_CF3_MASK 0x3
+#define TSTORM_RDMA_TASK_AG_CTX_CF3_SHIFT 0
+#define TSTORM_RDMA_TASK_AG_CTX_CF4_MASK 0x3
+#define TSTORM_RDMA_TASK_AG_CTX_CF4_SHIFT 2
+#define TSTORM_RDMA_TASK_AG_CTX_CF5_MASK 0x3
+#define TSTORM_RDMA_TASK_AG_CTX_CF5_SHIFT 4
+#define TSTORM_RDMA_TASK_AG_CTX_CF6_MASK 0x3
+#define TSTORM_RDMA_TASK_AG_CTX_CF6_SHIFT 6
u8 flags3;
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF7_MASK 0x3
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF7_SHIFT 0
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 2
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 3
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF2EN_MASK 0x1
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF2EN_SHIFT 4
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF3EN_MASK 0x1
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF3EN_SHIFT 5
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF4EN_MASK 0x1
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF4EN_SHIFT 6
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF5EN_MASK 0x1
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF5EN_SHIFT 7
+#define TSTORM_RDMA_TASK_AG_CTX_CF7_MASK 0x3
+#define TSTORM_RDMA_TASK_AG_CTX_CF7_SHIFT 0
+#define TSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 2
+#define TSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 3
+#define TSTORM_RDMA_TASK_AG_CTX_CF2EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_CF2EN_SHIFT 4
+#define TSTORM_RDMA_TASK_AG_CTX_CF3EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_CF3EN_SHIFT 5
+#define TSTORM_RDMA_TASK_AG_CTX_CF4EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_CF4EN_SHIFT 6
+#define TSTORM_RDMA_TASK_AG_CTX_CF5EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_CF5EN_SHIFT 7
u8 flags4;
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF6EN_MASK 0x1
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF6EN_SHIFT 0
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF7EN_MASK 0x1
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF7EN_SHIFT 1
-#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1
-#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 2
-#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1
-#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 3
-#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1
-#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 4
-#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1
-#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 5
-#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1
-#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 6
-#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1
-#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 7
+#define TSTORM_RDMA_TASK_AG_CTX_CF6EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_CF6EN_SHIFT 0
+#define TSTORM_RDMA_TASK_AG_CTX_CF7EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_CF7EN_SHIFT 1
+#define TSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 2
+#define TSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 3
+#define TSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 4
+#define TSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 5
+#define TSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 6
+#define TSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 7
u8 byte2;
__le16 word1;
__le32 reg0;
@@ -7172,63 +6430,63 @@ struct e4_tstorm_rdma_task_ag_ctx {
__le32 reg2;
};
-struct e4_ustorm_rdma_conn_ag_ctx {
+struct ustorm_rdma_conn_ag_ctx {
u8 reserved;
u8 byte1;
u8 flags0;
-#define E4_USTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_USTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define E4_USTORM_RDMA_CONN_AG_CTX_DIF_ERROR_REPORTED_MASK 0x1
-#define E4_USTORM_RDMA_CONN_AG_CTX_DIF_ERROR_REPORTED_SHIFT 1
-#define E4_USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
-#define E4_USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 2
-#define E4_USTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_USTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 4
-#define E4_USTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_USTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 6
+#define USTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define USTORM_RDMA_CONN_AG_CTX_DIF_ERROR_REPORTED_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_DIF_ERROR_REPORTED_SHIFT 1
+#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
+#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 2
+#define USTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3
+#define USTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 4
+#define USTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3
+#define USTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define E4_USTORM_RDMA_CONN_AG_CTX_CF3_MASK 0x3
-#define E4_USTORM_RDMA_CONN_AG_CTX_CF3_SHIFT 0
-#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_MASK 0x3
-#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_SHIFT 2
-#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_MASK 0x3
-#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_SHIFT 4
-#define E4_USTORM_RDMA_CONN_AG_CTX_CF6_MASK 0x3
-#define E4_USTORM_RDMA_CONN_AG_CTX_CF6_SHIFT 6
+#define USTORM_RDMA_CONN_AG_CTX_CF3_MASK 0x3
+#define USTORM_RDMA_CONN_AG_CTX_CF3_SHIFT 0
+#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_MASK 0x3
+#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_SHIFT 2
+#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_MASK 0x3
+#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_SHIFT 4
+#define USTORM_RDMA_CONN_AG_CTX_CF6_MASK 0x3
+#define USTORM_RDMA_CONN_AG_CTX_CF6_SHIFT 6
u8 flags2;
-#define E4_USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
-#define E4_USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0
-#define E4_USTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_USTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 1
-#define E4_USTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_USTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 2
-#define E4_USTORM_RDMA_CONN_AG_CTX_CF3EN_MASK 0x1
-#define E4_USTORM_RDMA_CONN_AG_CTX_CF3EN_SHIFT 3
-#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_EN_MASK 0x1
-#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_EN_SHIFT 4
-#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_EN_MASK 0x1
-#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_EN_SHIFT 5
-#define E4_USTORM_RDMA_CONN_AG_CTX_CF6EN_MASK 0x1
-#define E4_USTORM_RDMA_CONN_AG_CTX_CF6EN_SHIFT 6
-#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_SE_EN_MASK 0x1
-#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_SE_EN_SHIFT 7
+#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0
+#define USTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 1
+#define USTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 2
+#define USTORM_RDMA_CONN_AG_CTX_CF3EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_CF3EN_SHIFT 3
+#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_EN_SHIFT 4
+#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_EN_SHIFT 5
+#define USTORM_RDMA_CONN_AG_CTX_CF6EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_CF6EN_SHIFT 6
+#define USTORM_RDMA_CONN_AG_CTX_CQ_SE_EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_CQ_SE_EN_SHIFT 7
u8 flags3;
-#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_EN_MASK 0x1
-#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_EN_SHIFT 0
-#define E4_USTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_USTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define E4_USTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_USTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define E4_USTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_USTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define E4_USTORM_RDMA_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define E4_USTORM_RDMA_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define E4_USTORM_RDMA_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define E4_USTORM_RDMA_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define E4_USTORM_RDMA_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define E4_USTORM_RDMA_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define E4_USTORM_RDMA_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define E4_USTORM_RDMA_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define USTORM_RDMA_CONN_AG_CTX_CQ_EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_CQ_EN_SHIFT 0
+#define USTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define USTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define USTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define USTORM_RDMA_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define USTORM_RDMA_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define USTORM_RDMA_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define USTORM_RDMA_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_RULE8EN_SHIFT 7
u8 byte2;
u8 nvmf_only;
__le16 conn_dpi;
@@ -7241,214 +6499,214 @@ struct e4_ustorm_rdma_conn_ag_ctx {
__le16 word3;
};
-struct e4_xstorm_roce_conn_ag_ctx {
+struct xstorm_roce_conn_ag_ctx {
u8 reserved0;
u8 state;
u8 flags0;
-#define E4_XSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT2_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT2_SHIFT 2
-#define E4_XSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT4_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT4_SHIFT 4
-#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT5_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT5_SHIFT 5
-#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT6_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT6_SHIFT 6
-#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT7_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT7_SHIFT 7
+#define XSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define XSTORM_ROCE_CONN_AG_CTX_BIT1_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_BIT1_SHIFT 1
+#define XSTORM_ROCE_CONN_AG_CTX_BIT2_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_BIT2_SHIFT 2
+#define XSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define XSTORM_ROCE_CONN_AG_CTX_BIT4_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_BIT4_SHIFT 4
+#define XSTORM_ROCE_CONN_AG_CTX_BIT5_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_BIT5_SHIFT 5
+#define XSTORM_ROCE_CONN_AG_CTX_BIT6_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_BIT6_SHIFT 6
+#define XSTORM_ROCE_CONN_AG_CTX_BIT7_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_BIT7_SHIFT 7
u8 flags1;
-#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT8_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT8_SHIFT 0
-#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT9_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT9_SHIFT 1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT10_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT10_SHIFT 2
-#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT11_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT11_SHIFT 3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_MSDM_FLUSH_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_MSDM_FLUSH_SHIFT 4
-#define E4_XSTORM_ROCE_CONN_AG_CTX_MSEM_FLUSH_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_MSEM_FLUSH_SHIFT 5
-#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT14_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT14_SHIFT 6
-#define E4_XSTORM_ROCE_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7
+#define XSTORM_ROCE_CONN_AG_CTX_BIT8_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_BIT8_SHIFT 0
+#define XSTORM_ROCE_CONN_AG_CTX_BIT9_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_BIT9_SHIFT 1
+#define XSTORM_ROCE_CONN_AG_CTX_BIT10_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_BIT10_SHIFT 2
+#define XSTORM_ROCE_CONN_AG_CTX_BIT11_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_BIT11_SHIFT 3
+#define XSTORM_ROCE_CONN_AG_CTX_MSDM_FLUSH_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_MSDM_FLUSH_SHIFT 4
+#define XSTORM_ROCE_CONN_AG_CTX_MSEM_FLUSH_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_MSEM_FLUSH_SHIFT 5
+#define XSTORM_ROCE_CONN_AG_CTX_BIT14_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_BIT14_SHIFT 6
+#define XSTORM_ROCE_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7
u8 flags2;
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF0_SHIFT 0
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF1_SHIFT 2
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF2_SHIFT 4
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF3_MASK 0x3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF3_SHIFT 6
+#define XSTORM_ROCE_CONN_AG_CTX_CF0_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF0_SHIFT 0
+#define XSTORM_ROCE_CONN_AG_CTX_CF1_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF1_SHIFT 2
+#define XSTORM_ROCE_CONN_AG_CTX_CF2_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF2_SHIFT 4
+#define XSTORM_ROCE_CONN_AG_CTX_CF3_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF3_SHIFT 6
u8 flags3;
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF4_MASK 0x3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF4_SHIFT 0
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF5_MASK 0x3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF5_SHIFT 2
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF6_MASK 0x3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF6_SHIFT 4
-#define E4_XSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
+#define XSTORM_ROCE_CONN_AG_CTX_CF4_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF4_SHIFT 0
+#define XSTORM_ROCE_CONN_AG_CTX_CF5_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF5_SHIFT 2
+#define XSTORM_ROCE_CONN_AG_CTX_CF6_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF6_SHIFT 4
+#define XSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
u8 flags4;
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF8_MASK 0x3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF8_SHIFT 0
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF9_MASK 0x3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF9_SHIFT 2
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF10_MASK 0x3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF10_SHIFT 4
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF11_MASK 0x3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF11_SHIFT 6
+#define XSTORM_ROCE_CONN_AG_CTX_CF8_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF8_SHIFT 0
+#define XSTORM_ROCE_CONN_AG_CTX_CF9_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF9_SHIFT 2
+#define XSTORM_ROCE_CONN_AG_CTX_CF10_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF10_SHIFT 4
+#define XSTORM_ROCE_CONN_AG_CTX_CF11_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF11_SHIFT 6
u8 flags5;
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF12_MASK 0x3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF12_SHIFT 0
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF13_MASK 0x3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF13_SHIFT 2
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF14_MASK 0x3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF14_SHIFT 4
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF15_MASK 0x3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF15_SHIFT 6
+#define XSTORM_ROCE_CONN_AG_CTX_CF12_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF12_SHIFT 0
+#define XSTORM_ROCE_CONN_AG_CTX_CF13_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF13_SHIFT 2
+#define XSTORM_ROCE_CONN_AG_CTX_CF14_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF14_SHIFT 4
+#define XSTORM_ROCE_CONN_AG_CTX_CF15_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF15_SHIFT 6
u8 flags6;
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF16_MASK 0x3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF16_SHIFT 0
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF17_MASK 0x3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF17_SHIFT 2
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF18_MASK 0x3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF18_SHIFT 4
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF19_MASK 0x3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF19_SHIFT 6
+#define XSTORM_ROCE_CONN_AG_CTX_CF16_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF16_SHIFT 0
+#define XSTORM_ROCE_CONN_AG_CTX_CF17_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF17_SHIFT 2
+#define XSTORM_ROCE_CONN_AG_CTX_CF18_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF18_SHIFT 4
+#define XSTORM_ROCE_CONN_AG_CTX_CF19_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF19_SHIFT 6
u8 flags7;
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF20_MASK 0x3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF20_SHIFT 0
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF21_MASK 0x3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF21_SHIFT 2
-#define E4_XSTORM_ROCE_CONN_AG_CTX_SLOW_PATH_MASK 0x3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_SLOW_PATH_SHIFT 4
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF0EN_SHIFT 6
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF1EN_SHIFT 7
+#define XSTORM_ROCE_CONN_AG_CTX_CF20_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF20_SHIFT 0
+#define XSTORM_ROCE_CONN_AG_CTX_CF21_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF21_SHIFT 2
+#define XSTORM_ROCE_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define XSTORM_ROCE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF0EN_SHIFT 6
+#define XSTORM_ROCE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF1EN_SHIFT 7
u8 flags8;
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF2EN_SHIFT 0
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF3EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF3EN_SHIFT 1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF4EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF4EN_SHIFT 2
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF5EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF5EN_SHIFT 3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF6EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF6EN_SHIFT 4
-#define E4_XSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF8EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF8EN_SHIFT 6
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF9EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF9EN_SHIFT 7
+#define XSTORM_ROCE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF2EN_SHIFT 0
+#define XSTORM_ROCE_CONN_AG_CTX_CF3EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF3EN_SHIFT 1
+#define XSTORM_ROCE_CONN_AG_CTX_CF4EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF4EN_SHIFT 2
+#define XSTORM_ROCE_CONN_AG_CTX_CF5EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF5EN_SHIFT 3
+#define XSTORM_ROCE_CONN_AG_CTX_CF6EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF6EN_SHIFT 4
+#define XSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5
+#define XSTORM_ROCE_CONN_AG_CTX_CF8EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF8EN_SHIFT 6
+#define XSTORM_ROCE_CONN_AG_CTX_CF9EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF9EN_SHIFT 7
u8 flags9;
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF10EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF10EN_SHIFT 0
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF11EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF11EN_SHIFT 1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF12EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF12EN_SHIFT 2
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF13EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF13EN_SHIFT 3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF14EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF14EN_SHIFT 4
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF15EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF15EN_SHIFT 5
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF16EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF16EN_SHIFT 6
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF17EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF17EN_SHIFT 7
+#define XSTORM_ROCE_CONN_AG_CTX_CF10EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF10EN_SHIFT 0
+#define XSTORM_ROCE_CONN_AG_CTX_CF11EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF11EN_SHIFT 1
+#define XSTORM_ROCE_CONN_AG_CTX_CF12EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF12EN_SHIFT 2
+#define XSTORM_ROCE_CONN_AG_CTX_CF13EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF13EN_SHIFT 3
+#define XSTORM_ROCE_CONN_AG_CTX_CF14EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF14EN_SHIFT 4
+#define XSTORM_ROCE_CONN_AG_CTX_CF15EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF15EN_SHIFT 5
+#define XSTORM_ROCE_CONN_AG_CTX_CF16EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF16EN_SHIFT 6
+#define XSTORM_ROCE_CONN_AG_CTX_CF17EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF17EN_SHIFT 7
u8 flags10;
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF18EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF18EN_SHIFT 0
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF19EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF19EN_SHIFT 1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF20EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF20EN_SHIFT 2
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF21EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF21EN_SHIFT 3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF23EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF23EN_SHIFT 5
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE0EN_SHIFT 6
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE1EN_SHIFT 7
+#define XSTORM_ROCE_CONN_AG_CTX_CF18EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF18EN_SHIFT 0
+#define XSTORM_ROCE_CONN_AG_CTX_CF19EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF19EN_SHIFT 1
+#define XSTORM_ROCE_CONN_AG_CTX_CF20EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF20EN_SHIFT 2
+#define XSTORM_ROCE_CONN_AG_CTX_CF21EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF21EN_SHIFT 3
+#define XSTORM_ROCE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define XSTORM_ROCE_CONN_AG_CTX_CF23EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF23EN_SHIFT 5
+#define XSTORM_ROCE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_RULE0EN_SHIFT 6
+#define XSTORM_ROCE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_RULE1EN_SHIFT 7
u8 flags11;
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE2EN_SHIFT 0
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE3EN_SHIFT 1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE4EN_SHIFT 2
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE5EN_SHIFT 3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE6EN_SHIFT 4
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE7EN_SHIFT 5
-#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE9EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE9EN_SHIFT 7
+#define XSTORM_ROCE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_RULE2EN_SHIFT 0
+#define XSTORM_ROCE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_RULE3EN_SHIFT 1
+#define XSTORM_ROCE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_RULE4EN_SHIFT 2
+#define XSTORM_ROCE_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define XSTORM_ROCE_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define XSTORM_ROCE_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define XSTORM_ROCE_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_RULE9EN_SHIFT 7
u8 flags12;
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE10EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE10EN_SHIFT 0
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE11EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE11EN_SHIFT 1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
-#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE14EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE14EN_SHIFT 4
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE15EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE15EN_SHIFT 5
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE16EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE16EN_SHIFT 6
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE17EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE17EN_SHIFT 7
+#define XSTORM_ROCE_CONN_AG_CTX_RULE10EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_RULE10EN_SHIFT 0
+#define XSTORM_ROCE_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define XSTORM_ROCE_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_RULE14EN_SHIFT 4
+#define XSTORM_ROCE_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define XSTORM_ROCE_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define XSTORM_ROCE_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_RULE17EN_SHIFT 7
u8 flags13;
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE18EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE18EN_SHIFT 0
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE19EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE19EN_SHIFT 1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
-#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
-#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
-#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
-#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+#define XSTORM_ROCE_CONN_AG_CTX_RULE18EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_RULE18EN_SHIFT 0
+#define XSTORM_ROCE_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_RULE19EN_SHIFT 1
+#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
u8 flags14;
-#define E4_XSTORM_ROCE_CONN_AG_CTX_MIGRATION_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_MIGRATION_SHIFT 0
-#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT17_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT17_SHIFT 1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_DPM_PORT_NUM_MASK 0x3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_DPM_PORT_NUM_SHIFT 2
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RESERVED_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RESERVED_SHIFT 4
-#define E4_XSTORM_ROCE_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF23_MASK 0x3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF23_SHIFT 6
+#define XSTORM_ROCE_CONN_AG_CTX_MIGRATION_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_MIGRATION_SHIFT 0
+#define XSTORM_ROCE_CONN_AG_CTX_BIT17_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_BIT17_SHIFT 1
+#define XSTORM_ROCE_CONN_AG_CTX_DPM_PORT_NUM_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_DPM_PORT_NUM_SHIFT 2
+#define XSTORM_ROCE_CONN_AG_CTX_RESERVED_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_RESERVED_SHIFT 4
+#define XSTORM_ROCE_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
+#define XSTORM_ROCE_CONN_AG_CTX_CF23_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF23_SHIFT 6
u8 byte2;
__le16 physical_q0;
__le16 word1;
@@ -7470,89 +6728,89 @@ struct e4_xstorm_roce_conn_ag_ctx {
__le32 reg6;
};
-struct e4_tstorm_roce_conn_ag_ctx {
+struct tstorm_roce_conn_ag_ctx {
u8 reserved0;
u8 byte1;
u8 flags0;
-#define E4_TSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define E4_TSTORM_ROCE_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_BIT2_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_BIT2_SHIFT 2
-#define E4_TSTORM_ROCE_CONN_AG_CTX_BIT3_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_BIT3_SHIFT 3
-#define E4_TSTORM_ROCE_CONN_AG_CTX_BIT4_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_BIT4_SHIFT 4
-#define E4_TSTORM_ROCE_CONN_AG_CTX_BIT5_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_BIT5_SHIFT 5
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF0_SHIFT 6
+#define TSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define TSTORM_ROCE_CONN_AG_CTX_BIT1_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_BIT1_SHIFT 1
+#define TSTORM_ROCE_CONN_AG_CTX_BIT2_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_BIT2_SHIFT 2
+#define TSTORM_ROCE_CONN_AG_CTX_BIT3_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_BIT3_SHIFT 3
+#define TSTORM_ROCE_CONN_AG_CTX_BIT4_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_BIT4_SHIFT 4
+#define TSTORM_ROCE_CONN_AG_CTX_BIT5_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_BIT5_SHIFT 5
+#define TSTORM_ROCE_CONN_AG_CTX_CF0_MASK 0x3
+#define TSTORM_ROCE_CONN_AG_CTX_CF0_SHIFT 6
u8 flags1;
-#define E4_TSTORM_ROCE_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3
-#define E4_TSTORM_ROCE_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF2_SHIFT 2
-#define E4_TSTORM_ROCE_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3
-#define E4_TSTORM_ROCE_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT 4
-#define E4_TSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
-#define E4_TSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
+#define TSTORM_ROCE_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3
+#define TSTORM_ROCE_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0
+#define TSTORM_ROCE_CONN_AG_CTX_CF2_MASK 0x3
+#define TSTORM_ROCE_CONN_AG_CTX_CF2_SHIFT 2
+#define TSTORM_ROCE_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3
+#define TSTORM_ROCE_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT 4
+#define TSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
+#define TSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
u8 flags2;
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF5_MASK 0x3
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF5_SHIFT 0
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF6_MASK 0x3
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF6_SHIFT 2
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF7_MASK 0x3
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF7_SHIFT 4
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF8_MASK 0x3
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF8_SHIFT 6
+#define TSTORM_ROCE_CONN_AG_CTX_CF5_MASK 0x3
+#define TSTORM_ROCE_CONN_AG_CTX_CF5_SHIFT 0
+#define TSTORM_ROCE_CONN_AG_CTX_CF6_MASK 0x3
+#define TSTORM_ROCE_CONN_AG_CTX_CF6_SHIFT 2
+#define TSTORM_ROCE_CONN_AG_CTX_CF7_MASK 0x3
+#define TSTORM_ROCE_CONN_AG_CTX_CF7_SHIFT 4
+#define TSTORM_ROCE_CONN_AG_CTX_CF8_MASK 0x3
+#define TSTORM_ROCE_CONN_AG_CTX_CF8_SHIFT 6
u8 flags3;
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF9_MASK 0x3
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF9_SHIFT 0
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF10_MASK 0x3
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF10_SHIFT 2
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF0EN_SHIFT 4
-#define E4_TSTORM_ROCE_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 5
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF2EN_SHIFT 6
-#define E4_TSTORM_ROCE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7
+#define TSTORM_ROCE_CONN_AG_CTX_CF9_MASK 0x3
+#define TSTORM_ROCE_CONN_AG_CTX_CF9_SHIFT 0
+#define TSTORM_ROCE_CONN_AG_CTX_CF10_MASK 0x3
+#define TSTORM_ROCE_CONN_AG_CTX_CF10_SHIFT 2
+#define TSTORM_ROCE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_CF0EN_SHIFT 4
+#define TSTORM_ROCE_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 5
+#define TSTORM_ROCE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_CF2EN_SHIFT 6
+#define TSTORM_ROCE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7
u8 flags4;
-#define E4_TSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF5EN_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF5EN_SHIFT 1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF6EN_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF6EN_SHIFT 2
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF7EN_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF7EN_SHIFT 3
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF8EN_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF8EN_SHIFT 4
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF9EN_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF9EN_SHIFT 5
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF10EN_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF10EN_SHIFT 6
-#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define TSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0
+#define TSTORM_ROCE_CONN_AG_CTX_CF5EN_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_CF5EN_SHIFT 1
+#define TSTORM_ROCE_CONN_AG_CTX_CF6EN_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_CF6EN_SHIFT 2
+#define TSTORM_ROCE_CONN_AG_CTX_CF7EN_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_CF7EN_SHIFT 3
+#define TSTORM_ROCE_CONN_AG_CTX_CF8EN_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_CF8EN_SHIFT 4
+#define TSTORM_ROCE_CONN_AG_CTX_CF9EN_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_CF9EN_SHIFT 5
+#define TSTORM_ROCE_CONN_AG_CTX_CF10EN_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_CF10EN_SHIFT 6
+#define TSTORM_ROCE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags5;
-#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define TSTORM_ROCE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define TSTORM_ROCE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define TSTORM_ROCE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define TSTORM_ROCE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define TSTORM_ROCE_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define TSTORM_ROCE_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define TSTORM_ROCE_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define TSTORM_ROCE_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_RULE8EN_SHIFT 7
__le32 reg0;
__le32 reg1;
__le32 reg2;
@@ -7605,15 +6863,15 @@ struct ustorm_roce_conn_st_ctx {
};
/* roce connection context */
-struct e4_roce_conn_context {
+struct roce_conn_context {
struct ystorm_roce_conn_st_ctx ystorm_st_context;
struct regpair ystorm_st_padding[2];
struct pstorm_roce_conn_st_ctx pstorm_st_context;
struct xstorm_roce_conn_st_ctx xstorm_st_context;
- struct e4_xstorm_roce_conn_ag_ctx xstorm_ag_context;
- struct e4_tstorm_roce_conn_ag_ctx tstorm_ag_context;
+ struct xstorm_roce_conn_ag_ctx xstorm_ag_context;
+ struct tstorm_roce_conn_ag_ctx tstorm_ag_context;
struct timers_context timer_context;
- struct e4_ustorm_rdma_conn_ag_ctx ustorm_ag_context;
+ struct ustorm_rdma_conn_ag_ctx ustorm_ag_context;
struct tstorm_roce_conn_st_ctx tstorm_st_context;
struct regpair tstorm_st_padding[2];
struct mstorm_roce_conn_st_ctx mstorm_st_context;
@@ -7681,8 +6939,10 @@ struct roce_create_qp_req_ramrod_data {
#define ROCE_CREATE_QP_REQ_RAMROD_DATA_EDPM_MODE_SHIFT 0
#define ROCE_CREATE_QP_REQ_RAMROD_DATA_VF_ID_VALID_MASK 0x1
#define ROCE_CREATE_QP_REQ_RAMROD_DATA_VF_ID_VALID_SHIFT 1
-#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_MASK 0x3F
-#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_SHIFT 2
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_FORCE_LB_MASK 0x1
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_FORCE_LB_SHIFT 2
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_MASK 0x1F
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_SHIFT 3
u8 name_space;
u8 reserved3[3];
__le16 regular_latency_phy_queue;
@@ -7714,8 +6974,10 @@ struct roce_create_qp_resp_ramrod_data {
#define ROCE_CREATE_QP_RESP_RAMROD_DATA_XRC_FLAG_SHIFT 16
#define ROCE_CREATE_QP_RESP_RAMROD_DATA_VF_ID_VALID_MASK 0x1
#define ROCE_CREATE_QP_RESP_RAMROD_DATA_VF_ID_VALID_SHIFT 17
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_MASK 0x3FFF
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_SHIFT 18
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_FORCE_LB_MASK 0x1
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_FORCE_LB_SHIFT 18
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_MASK 0x1FFF
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_SHIFT 19
__le16 xrc_domain;
u8 max_ird;
u8 traffic_class;
@@ -7752,10 +7014,85 @@ struct roce_create_qp_resp_ramrod_data {
u8 reserved3[3];
};
+/* RoCE Create Suspended qp requester runtime ramrod data */
+struct roce_create_suspended_qp_req_runtime_ramrod_data {
+ __le32 flags;
+#define ROCE_CREATE_SUSPENDED_QP_REQ_RUNTIME_RAMROD_DATA_ERR_FLG_MASK 0x1
+#define ROCE_CREATE_SUSPENDED_QP_REQ_RUNTIME_RAMROD_DATA_ERR_FLG_SHIFT 0
+#define ROCE_CREATE_SUSPENDED_QP_REQ_RUNTIME_RAMROD_DATA_RESERVED0_MASK \
+ 0x7FFFFFFF
+#define ROCE_CREATE_SUSPENDED_QP_REQ_RUNTIME_RAMROD_DATA_RESERVED0_SHIFT 1
+ __le32 send_msg_psn;
+ __le32 inflight_sends;
+ __le32 ssn;
+};
+
+/* RoCE Create Suspended QP requester ramrod data */
+struct roce_create_suspended_qp_req_ramrod_data {
+ struct roce_create_qp_req_ramrod_data qp_params;
+ struct roce_create_suspended_qp_req_runtime_ramrod_data
+ qp_runtime_params;
+};
+
+/* RoCE Create Suspended QP responder runtime params */
+struct roce_create_suspended_qp_resp_runtime_params {
+ __le32 flags;
+#define ROCE_CREATE_SUSPENDED_QP_RESP_RUNTIME_PARAMS_ERR_FLG_MASK 0x1
+#define ROCE_CREATE_SUSPENDED_QP_RESP_RUNTIME_PARAMS_ERR_FLG_SHIFT 0
+#define ROCE_CREATE_SUSPENDED_QP_RESP_RUNTIME_PARAMS_RDMA_ACTIVE_MASK 0x1
+#define ROCE_CREATE_SUSPENDED_QP_RESP_RUNTIME_PARAMS_RDMA_ACTIVE_SHIFT 1
+#define ROCE_CREATE_SUSPENDED_QP_RESP_RUNTIME_PARAMS_RESERVED0_MASK 0x3FFFFFFF
+#define ROCE_CREATE_SUSPENDED_QP_RESP_RUNTIME_PARAMS_RESERVED0_SHIFT 2
+ __le32 receive_msg_psn;
+ __le32 inflight_receives;
+ __le32 rmsn;
+ __le32 rdma_key;
+ struct regpair rdma_va;
+ __le32 rdma_length;
+ __le32 num_rdb_entries;
+ __le32 resreved;
+};
+
+/* RoCE RDB array entry */
+struct roce_resp_qp_rdb_entry {
+ struct regpair atomic_data;
+ struct regpair va;
+ __le32 psn;
+ __le32 rkey;
+ __le32 byte_count;
+ u8 op_type;
+ u8 reserved[3];
+};
+
+/* RoCE Create Suspended QP responder runtime ramrod data */
+struct roce_create_suspended_qp_resp_runtime_ramrod_data {
+ struct roce_create_suspended_qp_resp_runtime_params params;
+ struct roce_resp_qp_rdb_entry
+ rdb_array_entries[RDMA_MAX_IRQ_ELEMS_IN_PAGE];
+};
+
+/* RoCE Create Suspended QP responder ramrod data */
+struct roce_create_suspended_qp_resp_ramrod_data {
+ struct roce_create_qp_resp_ramrod_data
+ qp_params;
+ struct roce_create_suspended_qp_resp_runtime_ramrod_data
+ qp_runtime_params;
+};
+
+/* RoCE create ud qp ramrod data */
+struct roce_create_ud_qp_ramrod_data {
+ __le16 local_mac_addr[3];
+ __le16 vlan_id;
+ __le32 src_qp_id;
+ u8 name_space;
+ u8 reserved[3];
+};
+
/* roce DCQCN received statistics */
struct roce_dcqcn_received_stats {
struct regpair ecn_pkt_rcv;
struct regpair cnp_pkt_rcv;
+ struct regpair cnp_pkt_reject;
};
/* roce DCQCN sent statistics */
@@ -7787,6 +7124,12 @@ struct roce_destroy_qp_resp_ramrod_data {
__le32 reserved;
};
+/* RoCE destroy ud qp ramrod data */
+struct roce_destroy_ud_qp_ramrod_data {
+ __le32 src_qp_id;
+ __le32 reserved;
+};
+
/* roce error statistics */
struct roce_error_stats {
__le32 resp_remote_access_errors;
@@ -7809,13 +7152,21 @@ struct roce_events_stats {
/* roce slow path EQ cmd IDs */
enum roce_event_opcode {
- ROCE_EVENT_CREATE_QP = 11,
+ ROCE_EVENT_CREATE_QP = 13,
ROCE_EVENT_MODIFY_QP,
ROCE_EVENT_QUERY_QP,
ROCE_EVENT_DESTROY_QP,
ROCE_EVENT_CREATE_UD_QP,
ROCE_EVENT_DESTROY_UD_QP,
ROCE_EVENT_FUNC_UPDATE,
+ ROCE_EVENT_SUSPEND_QP,
+ ROCE_EVENT_QUERY_SUSPENDED_QP,
+ ROCE_EVENT_CREATE_SUSPENDED_QP,
+ ROCE_EVENT_RESUME_QP,
+ ROCE_EVENT_SUSPEND_UD_QP,
+ ROCE_EVENT_RESUME_UD_QP,
+ ROCE_EVENT_CREATE_SUSPENDED_UD_QP,
+ ROCE_EVENT_FLUSH_DPT_QP,
MAX_ROCE_EVENT_OPCODE
};
@@ -7843,6 +7194,18 @@ struct roce_init_func_ramrod_data {
struct roce_init_func_params roce;
};
+/* roce_ll2_cqe_data */
+struct roce_ll2_cqe_data {
+ u8 name_space;
+ u8 flags;
+#define ROCE_LL2_CQE_DATA_QP_SUSPENDED_MASK 0x1
+#define ROCE_LL2_CQE_DATA_QP_SUSPENDED_SHIFT 0
+#define ROCE_LL2_CQE_DATA_RESERVED0_MASK 0x7F
+#define ROCE_LL2_CQE_DATA_RESERVED0_SHIFT 1
+ u8 reserved1[2];
+ __le32 cid;
+};
+
/* roce modify qp requester ramrod data */
struct roce_modify_qp_req_ramrod_data {
__le16 flags;
@@ -7870,8 +7233,10 @@ struct roce_modify_qp_req_ramrod_data {
#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_SHIFT 10
#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PHYSICAL_QUEUE_FLG_MASK 0x1
#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PHYSICAL_QUEUE_FLG_SHIFT 13
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RESERVED1_MASK 0x3
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RESERVED1_SHIFT 14
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_FORCE_LB_MASK 0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_FORCE_LB_SHIFT 14
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RESERVED1_MASK 0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RESERVED1_SHIFT 15
u8 fields;
#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_MASK 0xF
#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_SHIFT 0
@@ -7917,8 +7282,10 @@ struct roce_modify_qp_resp_ramrod_data {
#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG_SHIFT 9
#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PHYSICAL_QUEUE_FLG_MASK 0x1
#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PHYSICAL_QUEUE_FLG_SHIFT 10
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RESERVED1_MASK 0x1F
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RESERVED1_SHIFT 11
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_FORCE_LB_MASK 0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_FORCE_LB_SHIFT 11
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RESERVED1_MASK 0xF
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RESERVED1_SHIFT 12
u8 fields;
#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_MASK 0x7
#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_SHIFT 0
@@ -7969,18 +7336,84 @@ struct roce_query_qp_resp_ramrod_data {
struct regpair output_params_addr;
};
+/* RoCE Query Suspended QP requester output params */
+struct roce_query_suspended_qp_req_output_params {
+ __le32 psn;
+ __le32 flags;
+#define ROCE_QUERY_SUSPENDED_QP_REQ_OUTPUT_PARAMS_ERR_FLG_MASK 0x1
+#define ROCE_QUERY_SUSPENDED_QP_REQ_OUTPUT_PARAMS_ERR_FLG_SHIFT 0
+#define ROCE_QUERY_SUSPENDED_QP_REQ_OUTPUT_PARAMS_RESERVED0_MASK 0x7FFFFFFF
+#define ROCE_QUERY_SUSPENDED_QP_REQ_OUTPUT_PARAMS_RESERVED0_SHIFT 1
+ __le32 send_msg_psn;
+ __le32 inflight_sends;
+ __le32 ssn;
+ __le32 reserved;
+};
+
+/* RoCE Query Suspended QP requester ramrod data */
+struct roce_query_suspended_qp_req_ramrod_data {
+ struct regpair output_params_addr;
+};
+
+/* RoCE Query Suspended QP responder runtime params */
+struct roce_query_suspended_qp_resp_runtime_params {
+ __le32 psn;
+ __le32 flags;
+#define ROCE_QUERY_SUSPENDED_QP_RESP_RUNTIME_PARAMS_ERR_FLG_MASK 0x1
+#define ROCE_QUERY_SUSPENDED_QP_RESP_RUNTIME_PARAMS_ERR_FLG_SHIFT 0
+#define ROCE_QUERY_SUSPENDED_QP_RESP_RUNTIME_PARAMS_RDMA_ACTIVE_MASK 0x1
+#define ROCE_QUERY_SUSPENDED_QP_RESP_RUNTIME_PARAMS_RDMA_ACTIVE_SHIFT 1
+#define ROCE_QUERY_SUSPENDED_QP_RESP_RUNTIME_PARAMS_RESERVED0_MASK 0x3FFFFFFF
+#define ROCE_QUERY_SUSPENDED_QP_RESP_RUNTIME_PARAMS_RESERVED0_SHIFT 2
+ __le32 receive_msg_psn;
+ __le32 inflight_receives;
+ __le32 rmsn;
+ __le32 rdma_key;
+ struct regpair rdma_va;
+ __le32 rdma_length;
+ __le32 num_rdb_entries;
+};
+
+/* RoCE Query Suspended QP responder output params */
+struct roce_query_suspended_qp_resp_output_params {
+ struct roce_query_suspended_qp_resp_runtime_params runtime_params;
+ struct roce_resp_qp_rdb_entry
+ rdb_array_entries[RDMA_MAX_IRQ_ELEMS_IN_PAGE];
+};
+
+/* RoCE Query Suspended QP responder ramrod data */
+struct roce_query_suspended_qp_resp_ramrod_data {
+ struct regpair output_params_addr;
+};
+
/* ROCE ramrod command IDs */
enum roce_ramrod_cmd_id {
- ROCE_RAMROD_CREATE_QP = 11,
+ ROCE_RAMROD_CREATE_QP = 13,
ROCE_RAMROD_MODIFY_QP,
ROCE_RAMROD_QUERY_QP,
ROCE_RAMROD_DESTROY_QP,
ROCE_RAMROD_CREATE_UD_QP,
ROCE_RAMROD_DESTROY_UD_QP,
ROCE_RAMROD_FUNC_UPDATE,
+ ROCE_RAMROD_SUSPEND_QP,
+ ROCE_RAMROD_QUERY_SUSPENDED_QP,
+ ROCE_RAMROD_CREATE_SUSPENDED_QP,
+ ROCE_RAMROD_RESUME_QP,
+ ROCE_RAMROD_SUSPEND_UD_QP,
+ ROCE_RAMROD_RESUME_UD_QP,
+ ROCE_RAMROD_CREATE_SUSPENDED_UD_QP,
+ ROCE_RAMROD_FLUSH_DPT_QP,
MAX_ROCE_RAMROD_CMD_ID
};
+/* ROCE RDB array entry type */
+enum roce_resp_qp_rdb_entry_type {
+ ROCE_QP_RDB_ENTRY_RDMA_RESPONSE = 0,
+ ROCE_QP_RDB_ENTRY_ATOMIC_RESPONSE = 1,
+ ROCE_QP_RDB_ENTRY_INVALID = 2,
+ MAX_ROCE_RESP_QP_RDB_ENTRY_TYPE
+};
+
/* RoCE func init ramrod data */
struct roce_update_func_params {
u8 cnp_vlan_priority;
@@ -7995,7 +7428,7 @@ struct roce_update_func_params {
__le32 cnp_send_timeout;
};
-struct e4_xstorm_roce_conn_ag_ctx_dq_ext_ld_part {
+struct xstorm_roce_conn_ag_ctx_dq_ext_ld_part {
u8 reserved0;
u8 state;
u8 flags0;
@@ -8222,200 +7655,200 @@ struct e4_xstorm_roce_conn_ag_ctx_dq_ext_ld_part {
__le32 reg4;
};
-struct e4_mstorm_roce_conn_ag_ctx {
+struct mstorm_roce_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define E4_MSTORM_ROCE_CONN_AG_CTX_BIT0_MASK 0x1
-#define E4_MSTORM_ROCE_CONN_AG_CTX_BIT0_SHIFT 0
-#define E4_MSTORM_ROCE_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_MSTORM_ROCE_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_MSTORM_ROCE_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_MSTORM_ROCE_CONN_AG_CTX_CF0_SHIFT 2
-#define E4_MSTORM_ROCE_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_MSTORM_ROCE_CONN_AG_CTX_CF1_SHIFT 4
-#define E4_MSTORM_ROCE_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_MSTORM_ROCE_CONN_AG_CTX_CF2_SHIFT 6
+#define MSTORM_ROCE_CONN_AG_CTX_BIT0_MASK 0x1
+#define MSTORM_ROCE_CONN_AG_CTX_BIT0_SHIFT 0
+#define MSTORM_ROCE_CONN_AG_CTX_BIT1_MASK 0x1
+#define MSTORM_ROCE_CONN_AG_CTX_BIT1_SHIFT 1
+#define MSTORM_ROCE_CONN_AG_CTX_CF0_MASK 0x3
+#define MSTORM_ROCE_CONN_AG_CTX_CF0_SHIFT 2
+#define MSTORM_ROCE_CONN_AG_CTX_CF1_MASK 0x3
+#define MSTORM_ROCE_CONN_AG_CTX_CF1_SHIFT 4
+#define MSTORM_ROCE_CONN_AG_CTX_CF2_MASK 0x3
+#define MSTORM_ROCE_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define E4_MSTORM_ROCE_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_MSTORM_ROCE_CONN_AG_CTX_CF0EN_SHIFT 0
-#define E4_MSTORM_ROCE_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_MSTORM_ROCE_CONN_AG_CTX_CF1EN_SHIFT 1
-#define E4_MSTORM_ROCE_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_MSTORM_ROCE_CONN_AG_CTX_CF2EN_SHIFT 2
-#define E4_MSTORM_ROCE_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_MSTORM_ROCE_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define E4_MSTORM_ROCE_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_MSTORM_ROCE_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define E4_MSTORM_ROCE_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_MSTORM_ROCE_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define E4_MSTORM_ROCE_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_MSTORM_ROCE_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define E4_MSTORM_ROCE_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_MSTORM_ROCE_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define MSTORM_ROCE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define MSTORM_ROCE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define MSTORM_ROCE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define MSTORM_ROCE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define MSTORM_ROCE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define MSTORM_ROCE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define MSTORM_ROCE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define MSTORM_ROCE_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define MSTORM_ROCE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define MSTORM_ROCE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define MSTORM_ROCE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define MSTORM_ROCE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define MSTORM_ROCE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define MSTORM_ROCE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define MSTORM_ROCE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define MSTORM_ROCE_CONN_AG_CTX_RULE4EN_SHIFT 7
__le16 word0;
__le16 word1;
__le32 reg0;
__le32 reg1;
};
-struct e4_mstorm_roce_req_conn_ag_ctx {
+struct mstorm_roce_req_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 7
__le16 word0;
__le16 word1;
__le32 reg0;
__le32 reg1;
};
-struct e4_mstorm_roce_resp_conn_ag_ctx {
+struct mstorm_roce_resp_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 7
__le16 word0;
__le16 word1;
__le32 reg0;
__le32 reg1;
};
-struct e4_tstorm_roce_req_conn_ag_ctx {
+struct tstorm_roce_req_conn_ag_ctx {
u8 reserved0;
u8 state;
u8 flags0;
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_OCCURRED_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_OCCURRED_SHIFT 1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TX_CQE_ERROR_OCCURRED_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TX_CQE_ERROR_OCCURRED_SHIFT 2
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_BIT3_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_BIT3_SHIFT 3
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 4
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_CACHED_ORQ_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_CACHED_ORQ_SHIFT 5
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_MASK 0x3
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_SHIFT 6
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_OCCURRED_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_OCCURRED_SHIFT 1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_CQE_ERROR_OCCURRED_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_CQE_ERROR_OCCURRED_SHIFT 2
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_BIT3_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_BIT3_SHIFT 3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 4
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_CACHED_ORQ_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_CACHED_ORQ_SHIFT 5
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_MASK 0x3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_SHIFT 6
u8 flags1;
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_MASK 0x3
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_SHIFT 2
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT 4
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_MASK 0x3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_SHIFT 2
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT 4
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
u8 flags2;
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FORCE_COMP_CF_MASK 0x3
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FORCE_COMP_CF_SHIFT 0
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_MASK 0x3
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_SHIFT 2
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_MASK 0x3
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_SHIFT 4
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_MASK 0x3
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_SHIFT 6
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_FORCE_COMP_CF_MASK 0x3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_FORCE_COMP_CF_SHIFT 0
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_MASK 0x3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_SHIFT 2
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_MASK 0x3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_SHIFT 4
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_MASK 0x3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_SHIFT 6
u8 flags3;
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_MASK 0x3
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_SHIFT 0
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_MASK 0x3
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_SHIFT 2
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_EN_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_EN_SHIFT 4
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 5
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_EN_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_EN_SHIFT 6
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_MASK 0x3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_SHIFT 0
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_MASK 0x3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_SHIFT 2
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_EN_SHIFT 4
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 5
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_EN_SHIFT 6
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7
u8 flags4;
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FORCE_COMP_CF_EN_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FORCE_COMP_CF_EN_SHIFT 1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_EN_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_EN_SHIFT 2
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_EN_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_EN_SHIFT 3
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_EN_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_EN_SHIFT 4
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_EN_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_EN_SHIFT 5
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_EN_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_EN_SHIFT 6
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_FORCE_COMP_CF_EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_FORCE_COMP_CF_EN_SHIFT 1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_EN_SHIFT 2
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_EN_SHIFT 3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_EN_SHIFT 4
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_EN_SHIFT 5
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_EN_SHIFT 6
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags5;
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_DIF_CNT_EN_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_DIF_CNT_EN_SHIFT 1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SND_SQ_CONS_EN_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SND_SQ_CONS_EN_SHIFT 5
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_DIF_CNT_EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_DIF_CNT_EN_SHIFT 1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_SND_SQ_CONS_EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_SND_SQ_CONS_EN_SHIFT 5
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_SHIFT 7
__le32 dif_rxmit_cnt;
__le32 snd_nxt_psn;
__le32 snd_max_psn;
@@ -8437,89 +7870,89 @@ struct e4_tstorm_roce_req_conn_ag_ctx {
__le32 reg10;
};
-struct e4_tstorm_roce_resp_conn_ag_ctx {
+struct tstorm_roce_resp_conn_ag_ctx {
u8 byte0;
u8 state;
u8 flags0;
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_NOTIFY_REQUESTER_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_NOTIFY_REQUESTER_SHIFT 1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT2_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT2_SHIFT 2
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT3_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT3_SHIFT 3
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 4
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT5_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT5_SHIFT 5
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 6
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_NOTIFY_REQUESTER_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_NOTIFY_REQUESTER_SHIFT 1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT2_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT2_SHIFT 2
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT3_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT3_SHIFT 3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 4
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT5_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT5_SHIFT 5
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 6
u8 flags1;
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_MASK 0x3
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_SHIFT 2
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT 4
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_MASK 0x3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_SHIFT 2
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT 4
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
u8 flags2;
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_MASK 0x3
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_SHIFT 0
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF6_MASK 0x3
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF6_SHIFT 2
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF7_MASK 0x3
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF7_SHIFT 4
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF8_MASK 0x3
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF8_SHIFT 6
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_MASK 0x3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_SHIFT 0
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF6_MASK 0x3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF6_SHIFT 2
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF7_MASK 0x3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF7_SHIFT 4
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF8_MASK 0x3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF8_SHIFT 6
u8 flags3;
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF9_MASK 0x3
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF9_SHIFT 0
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF10_MASK 0x3
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF10_SHIFT 2
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 4
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 5
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_EN_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_EN_SHIFT 6
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT 7
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF9_MASK 0x3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF9_SHIFT 0
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF10_MASK 0x3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF10_SHIFT 2
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 4
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 5
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_EN_SHIFT 6
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT 7
u8 flags4;
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT 1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_SHIFT 2
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF7EN_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF7EN_SHIFT 3
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_SHIFT 4
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_SHIFT 5
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_SHIFT 6
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT 1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_SHIFT 2
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF7EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF7EN_SHIFT 3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_SHIFT 4
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_SHIFT 5
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_SHIFT 6
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags5;
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RQ_RULE_EN_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RQ_RULE_EN_SHIFT 5
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RQ_RULE_EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RQ_RULE_EN_SHIFT 5
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_SHIFT 7
__le32 psn_and_rxmit_id_echo;
__le32 reg1;
__le32 reg2;
@@ -8541,63 +7974,63 @@ struct e4_tstorm_roce_resp_conn_ag_ctx {
__le32 reg10;
};
-struct e4_ustorm_roce_req_conn_ag_ctx {
+struct ustorm_roce_req_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6
+#define USTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0
+#define USTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF3_MASK 0x3
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF3_SHIFT 0
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF4_MASK 0x3
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF4_SHIFT 2
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF5_MASK 0x3
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF5_SHIFT 4
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF6_MASK 0x3
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF6_SHIFT 6
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF3_MASK 0x3
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF3_SHIFT 0
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF4_MASK 0x3
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF4_SHIFT 2
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF5_MASK 0x3
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF5_SHIFT 4
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF6_MASK 0x3
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF6_SHIFT 6
u8 flags2;
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_MASK 0x1
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_SHIFT 3
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF4EN_MASK 0x1
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF4EN_SHIFT 4
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF5EN_MASK 0x1
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF5EN_SHIFT 5
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF6EN_MASK 0x1
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF6EN_SHIFT 6
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_SHIFT 3
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF4EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF4EN_SHIFT 4
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF5EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF5EN_SHIFT 5
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF6EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF6EN_SHIFT 6
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags3;
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_SHIFT 7
u8 byte2;
u8 byte3;
__le16 word0;
@@ -8610,63 +8043,63 @@ struct e4_ustorm_roce_req_conn_ag_ctx {
__le16 word3;
};
-struct e4_ustorm_roce_resp_conn_ag_ctx {
+struct ustorm_roce_resp_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6
+#define USTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0
+#define USTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT 0
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF4_MASK 0x3
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF4_SHIFT 2
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF5_MASK 0x3
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF5_SHIFT 4
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF6_MASK 0x3
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF6_SHIFT 6
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT 0
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF4_MASK 0x3
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF4_SHIFT 2
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF5_MASK 0x3
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF5_SHIFT 4
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF6_MASK 0x3
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF6_SHIFT 6
u8 flags2;
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT 3
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF4EN_MASK 0x1
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF4EN_SHIFT 4
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF5EN_MASK 0x1
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF5EN_SHIFT 5
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_MASK 0x1
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_SHIFT 6
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT 3
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF4EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF4EN_SHIFT 4
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF5EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF5EN_SHIFT 5
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_SHIFT 6
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags3;
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_SHIFT 7
u8 byte2;
u8 byte3;
__le16 word0;
@@ -8679,214 +8112,214 @@ struct e4_ustorm_roce_resp_conn_ag_ctx {
__le16 word3;
};
-struct e4_xstorm_roce_req_conn_ag_ctx {
+struct xstorm_roce_req_conn_ag_ctx {
u8 reserved0;
u8 state;
u8 flags0;
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED1_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED1_SHIFT 1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED2_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED2_SHIFT 2
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED3_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED3_SHIFT 4
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED4_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED4_SHIFT 5
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED5_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED5_SHIFT 6
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED6_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED6_SHIFT 7
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED1_SHIFT 1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED2_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED3_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED4_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED4_SHIFT 5
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED5_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED5_SHIFT 6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED6_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED6_SHIFT 7
u8 flags1;
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED7_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED7_SHIFT 0
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED8_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED8_SHIFT 1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT10_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT10_SHIFT 2
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT11_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT11_SHIFT 3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MSDM_FLUSH_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MSDM_FLUSH_SHIFT 4
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MSEM_FLUSH_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MSEM_FLUSH_SHIFT 5
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_STATE_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_STATE_SHIFT 6
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED7_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED7_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED8_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED8_SHIFT 1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT10_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT10_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT11_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT11_SHIFT 3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_MSDM_FLUSH_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_MSDM_FLUSH_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_MSEM_FLUSH_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_MSEM_FLUSH_SHIFT 5
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_STATE_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_STATE_SHIFT 6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7
u8 flags2;
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 0
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 2
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 4
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF3_MASK 0x3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF3_SHIFT 6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF3_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF3_SHIFT 6
u8 flags3;
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_MASK 0x3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_SHIFT 0
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_MASK 0x3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_SHIFT 2
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_MASK 0x3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_SHIFT 4
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
u8 flags4;
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_DIF_ERROR_CF_MASK 0x3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_DIF_ERROR_CF_SHIFT 0
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SCAN_SQ_FOR_COMP_CF_MASK 0x3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SCAN_SQ_FOR_COMP_CF_SHIFT 2
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF10_MASK 0x3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF10_SHIFT 4
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF11_MASK 0x3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF11_SHIFT 6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_DIF_ERROR_CF_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_DIF_ERROR_CF_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SCAN_SQ_FOR_COMP_CF_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SCAN_SQ_FOR_COMP_CF_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF10_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF10_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF11_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF11_SHIFT 6
u8 flags5;
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF12_MASK 0x3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF12_SHIFT 0
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF13_MASK 0x3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF13_SHIFT 2
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FMR_ENDED_CF_MASK 0x3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FMR_ENDED_CF_SHIFT 4
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF15_MASK 0x3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF15_SHIFT 6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF12_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF12_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF13_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF13_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_FMR_ENDED_CF_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_FMR_ENDED_CF_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF15_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF15_SHIFT 6
u8 flags6;
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF16_MASK 0x3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF16_SHIFT 0
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF17_MASK 0x3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF17_SHIFT 2
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF18_MASK 0x3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF18_SHIFT 4
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF19_MASK 0x3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF19_SHIFT 6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF16_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF16_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF17_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF17_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF18_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF18_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF19_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF19_SHIFT 6
u8 flags7;
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF20_MASK 0x3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF20_SHIFT 0
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF21_MASK 0x3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF21_SHIFT 2
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_MASK 0x3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_SHIFT 4
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 6
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 7
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF20_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF20_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF21_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF21_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 7
u8 flags8;
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 0
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_SHIFT 1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_EN_SHIFT 2
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT 3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_EN_SHIFT 4
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_DIF_ERROR_CF_EN_SHIFT 6
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SCAN_SQ_FOR_COMP_CF_EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SCAN_SQ_FOR_COMP_CF_EN_SHIFT 7
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_SHIFT 1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_EN_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT 3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_EN_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_DIF_ERROR_CF_EN_SHIFT 6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SCAN_SQ_FOR_COMP_CF_EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SCAN_SQ_FOR_COMP_CF_EN_SHIFT 7
u8 flags9;
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF10EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF10EN_SHIFT 0
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF11EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF11EN_SHIFT 1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF12EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF12EN_SHIFT 2
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF13EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF13EN_SHIFT 3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FME_ENDED_CF_EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FME_ENDED_CF_EN_SHIFT 4
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF15EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF15EN_SHIFT 5
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF16EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF16EN_SHIFT 6
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF17EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF17EN_SHIFT 7
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF10EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF10EN_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF11EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF11EN_SHIFT 1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF12EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF12EN_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF13EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF13EN_SHIFT 3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_FME_ENDED_CF_EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_FME_ENDED_CF_EN_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF15EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF15EN_SHIFT 5
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF16EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF16EN_SHIFT 6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF17EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF17EN_SHIFT 7
u8 flags10;
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF18EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF18EN_SHIFT 0
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF19EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF19EN_SHIFT 1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF20EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF20EN_SHIFT 2
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF21EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF21EN_SHIFT 3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF23EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF23EN_SHIFT 5
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 6
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 7
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF18EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF18EN_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF19EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF19EN_SHIFT 1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF20EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF20EN_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF21EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF21EN_SHIFT 3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF23EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF23EN_SHIFT 5
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 7
u8 flags11;
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 0
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 2
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_SHIFT 4
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_E2E_CREDIT_RULE_EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_E2E_CREDIT_RULE_EN_SHIFT 5
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE9EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE9EN_SHIFT 7
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_E2E_CREDIT_RULE_EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_E2E_CREDIT_RULE_EN_SHIFT 5
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE9EN_SHIFT 7
u8 flags12;
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_PROD_EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_PROD_EN_SHIFT 0
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE11EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE11EN_SHIFT 1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_INV_FENCE_RULE_EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_INV_FENCE_RULE_EN_SHIFT 4
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE15EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE15EN_SHIFT 5
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_ORQ_FENCE_RULE_EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_ORQ_FENCE_RULE_EN_SHIFT 6
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MAX_ORD_RULE_EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MAX_ORD_RULE_EN_SHIFT 7
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_PROD_EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_PROD_EN_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_INV_FENCE_RULE_EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_INV_FENCE_RULE_EN_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_ORQ_FENCE_RULE_EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_ORQ_FENCE_RULE_EN_SHIFT 6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_MAX_ORD_RULE_EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_MAX_ORD_RULE_EN_SHIFT 7
u8 flags13;
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE18EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE18EN_SHIFT 0
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE19EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE19EN_SHIFT 1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE18EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE18EN_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE19EN_SHIFT 1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
u8 flags14;
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MIGRATION_FLAG_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MIGRATION_FLAG_SHIFT 0
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT17_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT17_SHIFT 1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_DPM_PORT_NUM_MASK 0x3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_DPM_PORT_NUM_SHIFT 2
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED_SHIFT 4
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF23_MASK 0x3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF23_SHIFT 6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_MIGRATION_FLAG_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_MIGRATION_FLAG_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT17_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT17_SHIFT 1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_DPM_PORT_NUM_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_DPM_PORT_NUM_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF23_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF23_SHIFT 6
u8 byte2;
__le16 physical_q0;
__le16 word1;
@@ -8908,216 +8341,216 @@ struct e4_xstorm_roce_req_conn_ag_ctx {
__le32 orq_cons;
};
-struct e4_xstorm_roce_resp_conn_ag_ctx {
+struct xstorm_roce_resp_conn_ag_ctx {
u8 reserved0;
u8 state;
u8 flags0;
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED1_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED1_SHIFT 1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED2_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED2_SHIFT 2
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED3_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED3_SHIFT 4
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED4_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED4_SHIFT 5
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED5_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED5_SHIFT 6
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED6_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED6_SHIFT 7
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED1_SHIFT 1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED2_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED3_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED4_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED4_SHIFT 5
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED5_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED5_SHIFT 6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED6_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED6_SHIFT 7
u8 flags1;
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED7_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED7_SHIFT 0
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED8_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED8_SHIFT 1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT10_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT10_SHIFT 2
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT11_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT11_SHIFT 3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_MSDM_FLUSH_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_MSDM_FLUSH_SHIFT 4
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_MSEM_FLUSH_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_MSEM_FLUSH_SHIFT 5
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_ERROR_STATE_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_ERROR_STATE_SHIFT 6
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED7_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED7_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED8_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED8_SHIFT 1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT10_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT10_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT11_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT11_SHIFT 3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_MSDM_FLUSH_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_MSDM_FLUSH_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_MSEM_FLUSH_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_MSEM_FLUSH_SHIFT 5
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_ERROR_STATE_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_ERROR_STATE_SHIFT 6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7
u8 flags2;
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 0
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 2
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 4
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT 6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT 6
u8 flags3;
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_MASK 0x3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_SHIFT 0
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_MASK 0x3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_SHIFT 2
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_MASK 0x3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_SHIFT 4
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
u8 flags4;
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF8_MASK 0x3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF8_SHIFT 0
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF9_MASK 0x3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF9_SHIFT 2
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF10_MASK 0x3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF10_SHIFT 4
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF11_MASK 0x3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF11_SHIFT 6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF8_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF8_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF9_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF9_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF10_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF10_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF11_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF11_SHIFT 6
u8 flags5;
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF12_MASK 0x3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF12_SHIFT 0
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF13_MASK 0x3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF13_SHIFT 2
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF14_MASK 0x3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF14_SHIFT 4
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF15_MASK 0x3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF15_SHIFT 6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF12_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF12_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF13_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF13_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF14_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF14_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF15_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF15_SHIFT 6
u8 flags6;
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF16_MASK 0x3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF16_SHIFT 0
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF17_MASK 0x3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF17_SHIFT 2
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF18_MASK 0x3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF18_SHIFT 4
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF19_MASK 0x3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF19_SHIFT 6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF16_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF16_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF17_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF17_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF18_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF18_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF19_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF19_SHIFT 6
u8 flags7;
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF20_MASK 0x3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF20_SHIFT 0
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF21_MASK 0x3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF21_SHIFT 2
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_MASK 0x3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_SHIFT 4
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 6
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 7
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF20_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF20_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF21_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF21_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 7
u8 flags8;
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 0
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT 1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_EN_SHIFT 2
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT 3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_EN_SHIFT 4
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_SHIFT 6
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_SHIFT 7
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT 1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_EN_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT 3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_EN_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_SHIFT 6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_SHIFT 7
u8 flags9;
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_SHIFT 0
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF11EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF11EN_SHIFT 1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF12EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF12EN_SHIFT 2
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF13EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF13EN_SHIFT 3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF14EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF14EN_SHIFT 4
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF15EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF15EN_SHIFT 5
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF16EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF16EN_SHIFT 6
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF17EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF17EN_SHIFT 7
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF11EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF11EN_SHIFT 1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF12EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF12EN_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF13EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF13EN_SHIFT 3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF14EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF14EN_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF15EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF15EN_SHIFT 5
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF16EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF16EN_SHIFT 6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF17EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF17EN_SHIFT 7
u8 flags10;
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF18EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF18EN_SHIFT 0
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF19EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF19EN_SHIFT 1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF20EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF20EN_SHIFT 2
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF21EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF21EN_SHIFT 3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF23EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF23EN_SHIFT 5
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 6
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 7
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF18EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF18EN_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF19EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF19EN_SHIFT 1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF20EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF20EN_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF21EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF21EN_SHIFT 3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF23EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF23EN_SHIFT 5
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 7
u8 flags11;
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 0
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 2
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_SHIFT 4
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 5
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE9EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE9EN_SHIFT 7
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE9EN_SHIFT 7
u8 flags12;
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_IRQ_PROD_RULE_EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_IRQ_PROD_RULE_EN_SHIFT 0
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE11EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE11EN_SHIFT 1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE14EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE14EN_SHIFT 4
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE15EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE15EN_SHIFT 5
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE16EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE16EN_SHIFT 6
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE17EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE17EN_SHIFT 7
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_IRQ_PROD_RULE_EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_IRQ_PROD_RULE_EN_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE14EN_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE17EN_SHIFT 7
u8 flags13;
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE18EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE18EN_SHIFT 0
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE19EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE19EN_SHIFT 1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE18EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE18EN_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE19EN_SHIFT 1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
u8 flags14;
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT16_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT16_SHIFT 0
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT17_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT17_SHIFT 1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT18_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT18_SHIFT 2
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT19_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT19_SHIFT 3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT20_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT20_SHIFT 4
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT21_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT21_SHIFT 5
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF23_MASK 0x3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF23_SHIFT 6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT16_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT16_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT17_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT17_SHIFT 1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT18_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT18_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT19_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT19_SHIFT 3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT20_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT20_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT21_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT21_SHIFT 5
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF23_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF23_SHIFT 6
u8 byte2;
__le16 physical_q0;
__le16 irq_prod_shadow;
@@ -9139,37 +8572,37 @@ struct e4_xstorm_roce_resp_conn_ag_ctx {
__le32 msn_and_syndrome;
};
-struct e4_ystorm_roce_conn_ag_ctx {
+struct ystorm_roce_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define E4_YSTORM_ROCE_CONN_AG_CTX_BIT0_MASK 0x1
-#define E4_YSTORM_ROCE_CONN_AG_CTX_BIT0_SHIFT 0
-#define E4_YSTORM_ROCE_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_YSTORM_ROCE_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_YSTORM_ROCE_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_YSTORM_ROCE_CONN_AG_CTX_CF0_SHIFT 2
-#define E4_YSTORM_ROCE_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_YSTORM_ROCE_CONN_AG_CTX_CF1_SHIFT 4
-#define E4_YSTORM_ROCE_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_YSTORM_ROCE_CONN_AG_CTX_CF2_SHIFT 6
+#define YSTORM_ROCE_CONN_AG_CTX_BIT0_MASK 0x1
+#define YSTORM_ROCE_CONN_AG_CTX_BIT0_SHIFT 0
+#define YSTORM_ROCE_CONN_AG_CTX_BIT1_MASK 0x1
+#define YSTORM_ROCE_CONN_AG_CTX_BIT1_SHIFT 1
+#define YSTORM_ROCE_CONN_AG_CTX_CF0_MASK 0x3
+#define YSTORM_ROCE_CONN_AG_CTX_CF0_SHIFT 2
+#define YSTORM_ROCE_CONN_AG_CTX_CF1_MASK 0x3
+#define YSTORM_ROCE_CONN_AG_CTX_CF1_SHIFT 4
+#define YSTORM_ROCE_CONN_AG_CTX_CF2_MASK 0x3
+#define YSTORM_ROCE_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define E4_YSTORM_ROCE_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_YSTORM_ROCE_CONN_AG_CTX_CF0EN_SHIFT 0
-#define E4_YSTORM_ROCE_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_YSTORM_ROCE_CONN_AG_CTX_CF1EN_SHIFT 1
-#define E4_YSTORM_ROCE_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_YSTORM_ROCE_CONN_AG_CTX_CF2EN_SHIFT 2
-#define E4_YSTORM_ROCE_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_YSTORM_ROCE_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define E4_YSTORM_ROCE_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_YSTORM_ROCE_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define E4_YSTORM_ROCE_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_YSTORM_ROCE_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define E4_YSTORM_ROCE_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_YSTORM_ROCE_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define E4_YSTORM_ROCE_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_YSTORM_ROCE_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define YSTORM_ROCE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define YSTORM_ROCE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define YSTORM_ROCE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define YSTORM_ROCE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define YSTORM_ROCE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define YSTORM_ROCE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define YSTORM_ROCE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define YSTORM_ROCE_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define YSTORM_ROCE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define YSTORM_ROCE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define YSTORM_ROCE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define YSTORM_ROCE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define YSTORM_ROCE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define YSTORM_ROCE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define YSTORM_ROCE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define YSTORM_ROCE_CONN_AG_CTX_RULE4EN_SHIFT 7
u8 byte2;
u8 byte3;
__le16 word0;
@@ -9183,37 +8616,37 @@ struct e4_ystorm_roce_conn_ag_ctx {
__le32 reg3;
};
-struct e4_ystorm_roce_req_conn_ag_ctx {
+struct ystorm_roce_req_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 7
u8 byte2;
u8 byte3;
__le16 word0;
@@ -9227,37 +8660,37 @@ struct e4_ystorm_roce_req_conn_ag_ctx {
__le32 reg3;
};
-struct e4_ystorm_roce_resp_conn_ag_ctx {
+struct ystorm_roce_resp_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 7
u8 byte2;
u8 byte3;
__le16 word0;
@@ -9294,216 +8727,216 @@ struct xstorm_iwarp_conn_st_ctx {
__le32 reserved[48];
};
-struct e4_xstorm_iwarp_conn_ag_ctx {
+struct xstorm_iwarp_conn_ag_ctx {
u8 reserved0;
u8 state;
u8 flags0;
-#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM1_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM1_SHIFT 1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM2_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM2_SHIFT 2
-#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT4_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT4_SHIFT 4
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RESERVED2_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RESERVED2_SHIFT 5
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT6_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT6_SHIFT 6
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT7_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT7_SHIFT 7
+#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM1_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM1_SHIFT 1
+#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM2_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM2_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define XSTORM_IWARP_CONN_AG_CTX_BIT4_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT4_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RESERVED2_SHIFT 5
+#define XSTORM_IWARP_CONN_AG_CTX_BIT6_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT6_SHIFT 6
+#define XSTORM_IWARP_CONN_AG_CTX_BIT7_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT7_SHIFT 7
u8 flags1;
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT8_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT8_SHIFT 0
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT9_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT9_SHIFT 1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT10_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT10_SHIFT 2
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT11_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT11_SHIFT 3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT12_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT12_SHIFT 4
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT13_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT13_SHIFT 5
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT14_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT14_SHIFT 6
-#define E4_XSTORM_IWARP_CONN_AG_CTX_YSTORM_FLUSH_OR_REWIND_SND_MAX_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_YSTORM_FLUSH_OR_REWIND_SND_MAX_SHIFT 7
+#define XSTORM_IWARP_CONN_AG_CTX_BIT8_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT8_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_BIT9_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT9_SHIFT 1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT10_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT10_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_BIT11_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT11_SHIFT 3
+#define XSTORM_IWARP_CONN_AG_CTX_BIT12_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT12_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_BIT13_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT13_SHIFT 5
+#define XSTORM_IWARP_CONN_AG_CTX_BIT14_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT14_SHIFT 6
+#define XSTORM_IWARP_CONN_AG_CTX_YSTORM_FLUSH_OR_REWIND_SND_MAX_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_YSTORM_FLUSH_OR_REWIND_SND_MAX_SHIFT 7
u8 flags2;
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 0
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 2
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 4
-#define E4_XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 6
+#define XSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 6
u8 flags3;
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF4_MASK 0x3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF4_SHIFT 0
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF5_MASK 0x3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF5_SHIFT 2
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF6_MASK 0x3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF6_SHIFT 4
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF7_MASK 0x3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF7_SHIFT 6
+#define XSTORM_IWARP_CONN_AG_CTX_CF4_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF4_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_CF5_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF5_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_CF6_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF6_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_CF7_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF7_SHIFT 6
u8 flags4;
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF8_MASK 0x3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF8_SHIFT 0
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF9_MASK 0x3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF9_SHIFT 2
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF10_MASK 0x3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF10_SHIFT 4
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF11_MASK 0x3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF11_SHIFT 6
+#define XSTORM_IWARP_CONN_AG_CTX_CF8_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF8_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_CF9_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF9_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_CF10_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF10_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_CF11_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF11_SHIFT 6
u8 flags5;
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF12_MASK 0x3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF12_SHIFT 0
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF13_MASK 0x3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF13_SHIFT 2
-#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_MASK 0x3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_SHIFT 4
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF15_MASK 0x3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF15_SHIFT 6
+#define XSTORM_IWARP_CONN_AG_CTX_CF12_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF12_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_CF13_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF13_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_CF15_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF15_SHIFT 6
u8 flags6;
-#define E4_XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_MASK 0x3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_SHIFT 0
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF17_MASK 0x3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF17_SHIFT 2
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF18_MASK 0x3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF18_SHIFT 4
-#define E4_XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_MASK 0x3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_SHIFT 6
+#define XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_CF17_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF17_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_CF18_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF18_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_SHIFT 6
u8 flags7;
-#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
-#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_MASK 0x3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_SHIFT 2
-#define E4_XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_MASK 0x3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_SHIFT 4
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 6
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 7
+#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 6
+#define XSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 7
u8 flags8;
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 0
-#define E4_XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF4EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF4EN_SHIFT 2
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF5EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF5EN_SHIFT 3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF6EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT 4
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF7EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF7EN_SHIFT 5
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF8EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF8EN_SHIFT 6
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF9EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF9EN_SHIFT 7
+#define XSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 1
+#define XSTORM_IWARP_CONN_AG_CTX_CF4EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF4EN_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_CF5EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF5EN_SHIFT 3
+#define XSTORM_IWARP_CONN_AG_CTX_CF6EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_CF7EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF7EN_SHIFT 5
+#define XSTORM_IWARP_CONN_AG_CTX_CF8EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF8EN_SHIFT 6
+#define XSTORM_IWARP_CONN_AG_CTX_CF9EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF9EN_SHIFT 7
u8 flags9;
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF10EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF10EN_SHIFT 0
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF11EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF11EN_SHIFT 1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF12EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF12EN_SHIFT 2
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF13EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF13EN_SHIFT 3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_EN_SHIFT 4
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF15EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF15EN_SHIFT 5
-#define E4_XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_EN_SHIFT 6
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF17EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF17EN_SHIFT 7
+#define XSTORM_IWARP_CONN_AG_CTX_CF10EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF10EN_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_CF11EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF11EN_SHIFT 1
+#define XSTORM_IWARP_CONN_AG_CTX_CF12EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF12EN_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_CF13EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF13EN_SHIFT 3
+#define XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_EN_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_CF15EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF15EN_SHIFT 5
+#define XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_EN_SHIFT 6
+#define XSTORM_IWARP_CONN_AG_CTX_CF17EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF17EN_SHIFT 7
u8 flags10;
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF18EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF18EN_SHIFT 0
-#define E4_XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_EN_SHIFT 1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
-#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_EN_SHIFT 3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
-#define E4_XSTORM_IWARP_CONN_AG_CTX_SEND_TERMINATE_CF_EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_SEND_TERMINATE_CF_EN_SHIFT 5
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 6
-#define E4_XSTORM_IWARP_CONN_AG_CTX_MORE_TO_SEND_RULE_EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_MORE_TO_SEND_RULE_EN_SHIFT 7
+#define XSTORM_IWARP_CONN_AG_CTX_CF18EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF18EN_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_EN_SHIFT 1
+#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_EN_SHIFT 3
+#define XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_SEND_TERMINATE_CF_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_SEND_TERMINATE_CF_EN_SHIFT 5
+#define XSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 6
+#define XSTORM_IWARP_CONN_AG_CTX_MORE_TO_SEND_RULE_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_MORE_TO_SEND_RULE_EN_SHIFT 7
u8 flags11;
-#define E4_XSTORM_IWARP_CONN_AG_CTX_TX_BLOCKED_EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_TX_BLOCKED_EN_SHIFT 0
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RESERVED3_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RESERVED3_SHIFT 2
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT 3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE6EN_SHIFT 4
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT 5
-#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE9EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE9EN_SHIFT 7
+#define XSTORM_IWARP_CONN_AG_CTX_TX_BLOCKED_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_TX_BLOCKED_EN_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 1
+#define XSTORM_IWARP_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RESERVED3_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define XSTORM_IWARP_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define XSTORM_IWARP_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RULE9EN_SHIFT 7
u8 flags12;
-#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_NOT_EMPTY_RULE_EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_NOT_EMPTY_RULE_EN_SHIFT 0
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE11EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE11EN_SHIFT 1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
-#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_FENCE_RULE_EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_FENCE_RULE_EN_SHIFT 4
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE15EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE15EN_SHIFT 5
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE16EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE16EN_SHIFT 6
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE17EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE17EN_SHIFT 7
+#define XSTORM_IWARP_CONN_AG_CTX_SQ_NOT_EMPTY_RULE_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_SQ_NOT_EMPTY_RULE_EN_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define XSTORM_IWARP_CONN_AG_CTX_SQ_FENCE_RULE_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_SQ_FENCE_RULE_EN_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define XSTORM_IWARP_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define XSTORM_IWARP_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RULE17EN_SHIFT 7
u8 flags13;
-#define E4_XSTORM_IWARP_CONN_AG_CTX_IRQ_NOT_EMPTY_RULE_EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_IRQ_NOT_EMPTY_RULE_EN_SHIFT 0
-#define E4_XSTORM_IWARP_CONN_AG_CTX_HQ_NOT_FULL_RULE_EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_HQ_NOT_FULL_RULE_EN_SHIFT 1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_ORQ_RD_FENCE_RULE_EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_ORQ_RD_FENCE_RULE_EN_SHIFT 2
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE21EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE21EN_SHIFT 3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
-#define E4_XSTORM_IWARP_CONN_AG_CTX_ORQ_NOT_FULL_RULE_EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_ORQ_NOT_FULL_RULE_EN_SHIFT 5
-#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
-#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+#define XSTORM_IWARP_CONN_AG_CTX_IRQ_NOT_EMPTY_RULE_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_IRQ_NOT_EMPTY_RULE_EN_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_HQ_NOT_FULL_RULE_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_HQ_NOT_FULL_RULE_EN_SHIFT 1
+#define XSTORM_IWARP_CONN_AG_CTX_ORQ_RD_FENCE_RULE_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_ORQ_RD_FENCE_RULE_EN_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_RULE21EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RULE21EN_SHIFT 3
+#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_ORQ_NOT_FULL_RULE_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_ORQ_NOT_FULL_RULE_EN_SHIFT 5
+#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
u8 flags14;
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT16_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT16_SHIFT 0
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT17_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT17_SHIFT 1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT18_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT18_SHIFT 2
-#define E4_XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED1_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED1_SHIFT 3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED2_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED2_SHIFT 4
-#define E4_XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED3_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED3_SHIFT 5
-#define E4_XSTORM_IWARP_CONN_AG_CTX_SEND_TERMINATE_CF_MASK 0x3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_SEND_TERMINATE_CF_SHIFT 6
+#define XSTORM_IWARP_CONN_AG_CTX_BIT16_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT16_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_BIT17_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT17_SHIFT 1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT18_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT18_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED1_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED1_SHIFT 3
+#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED2_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED2_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED3_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED3_SHIFT 5
+#define XSTORM_IWARP_CONN_AG_CTX_SEND_TERMINATE_CF_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_SEND_TERMINATE_CF_SHIFT 6
u8 byte2;
__le16 physical_q0;
__le16 physical_q1;
@@ -9551,89 +8984,89 @@ struct e4_xstorm_iwarp_conn_ag_ctx {
__le32 reg17;
};
-struct e4_tstorm_iwarp_conn_ag_ctx {
+struct tstorm_iwarp_conn_ag_ctx {
u8 reserved0;
u8 state;
u8 flags0;
-#define E4_TSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT2_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT2_SHIFT 2
-#define E4_TSTORM_IWARP_CONN_AG_CTX_MSTORM_FLUSH_OR_TERMINATE_SENT_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_MSTORM_FLUSH_OR_TERMINATE_SENT_SHIFT 3
-#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT4_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT4_SHIFT 4
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CACHED_ORQ_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CACHED_ORQ_SHIFT 5
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 6
+#define TSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define TSTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1
+#define TSTORM_IWARP_CONN_AG_CTX_BIT2_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_BIT2_SHIFT 2
+#define TSTORM_IWARP_CONN_AG_CTX_MSTORM_FLUSH_OR_TERMINATE_SENT_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_MSTORM_FLUSH_OR_TERMINATE_SENT_SHIFT 3
+#define TSTORM_IWARP_CONN_AG_CTX_BIT4_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_BIT4_SHIFT 4
+#define TSTORM_IWARP_CONN_AG_CTX_CACHED_ORQ_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_CACHED_ORQ_SHIFT 5
+#define TSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3
+#define TSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 6
u8 flags1;
-#define E4_TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_MASK 0x3
-#define E4_TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_SHIFT 0
-#define E4_TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_MASK 0x3
-#define E4_TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_SHIFT 2
-#define E4_TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3
-#define E4_TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 4
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CF4_MASK 0x3
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CF4_SHIFT 6
+#define TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_MASK 0x3
+#define TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_SHIFT 0
+#define TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_MASK 0x3
+#define TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_SHIFT 2
+#define TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3
+#define TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 4
+#define TSTORM_IWARP_CONN_AG_CTX_CF4_MASK 0x3
+#define TSTORM_IWARP_CONN_AG_CTX_CF4_SHIFT 6
u8 flags2;
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CF5_MASK 0x3
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CF5_SHIFT 0
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CF6_MASK 0x3
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CF6_SHIFT 2
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CF7_MASK 0x3
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CF7_SHIFT 4
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CF8_MASK 0x3
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CF8_SHIFT 6
+#define TSTORM_IWARP_CONN_AG_CTX_CF5_MASK 0x3
+#define TSTORM_IWARP_CONN_AG_CTX_CF5_SHIFT 0
+#define TSTORM_IWARP_CONN_AG_CTX_CF6_MASK 0x3
+#define TSTORM_IWARP_CONN_AG_CTX_CF6_SHIFT 2
+#define TSTORM_IWARP_CONN_AG_CTX_CF7_MASK 0x3
+#define TSTORM_IWARP_CONN_AG_CTX_CF7_SHIFT 4
+#define TSTORM_IWARP_CONN_AG_CTX_CF8_MASK 0x3
+#define TSTORM_IWARP_CONN_AG_CTX_CF8_SHIFT 6
u8 flags3;
-#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPLETE_MASK 0x3
-#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPLETE_SHIFT 0
-#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_MASK 0x3
-#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_SHIFT 2
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 4
-#define E4_TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_EN_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_EN_SHIFT 5
-#define E4_TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_EN_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_EN_SHIFT 6
-#define E4_TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 7
+#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPLETE_MASK 0x3
+#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPLETE_SHIFT 0
+#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_MASK 0x3
+#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_SHIFT 2
+#define TSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 4
+#define TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_EN_SHIFT 5
+#define TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_EN_SHIFT 6
+#define TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 7
u8 flags4;
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CF4EN_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CF4EN_SHIFT 0
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CF5EN_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CF5EN_SHIFT 1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CF6EN_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT 2
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CF7EN_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CF7EN_SHIFT 3
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CF8EN_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CF8EN_SHIFT 4
-#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPL_EN_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPL_EN_SHIFT 5
-#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_EN_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_EN_SHIFT 6
-#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define TSTORM_IWARP_CONN_AG_CTX_CF4EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_CF4EN_SHIFT 0
+#define TSTORM_IWARP_CONN_AG_CTX_CF5EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_CF5EN_SHIFT 1
+#define TSTORM_IWARP_CONN_AG_CTX_CF6EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT 2
+#define TSTORM_IWARP_CONN_AG_CTX_CF7EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_CF7EN_SHIFT 3
+#define TSTORM_IWARP_CONN_AG_CTX_CF8EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_CF8EN_SHIFT 4
+#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPL_EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPL_EN_SHIFT 5
+#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_EN_SHIFT 6
+#define TSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags5;
-#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define E4_TSTORM_IWARP_CONN_AG_CTX_SND_SQ_CONS_RULE_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_SND_SQ_CONS_RULE_SHIFT 5
-#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define TSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define TSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define TSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define TSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define TSTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define TSTORM_IWARP_CONN_AG_CTX_SND_SQ_CONS_RULE_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_SND_SQ_CONS_RULE_SHIFT 5
+#define TSTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define TSTORM_IWARP_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_RULE8EN_SHIFT 7
__le32 reg0;
__le32 reg1;
__le32 unaligned_nxt_seq;
@@ -9671,16 +9104,16 @@ struct ustorm_iwarp_conn_st_ctx {
};
/* iwarp connection context */
-struct e4_iwarp_conn_context {
+struct iwarp_conn_context {
struct ystorm_iwarp_conn_st_ctx ystorm_st_context;
struct regpair ystorm_st_padding[2];
struct pstorm_iwarp_conn_st_ctx pstorm_st_context;
struct regpair pstorm_st_padding[2];
struct xstorm_iwarp_conn_st_ctx xstorm_st_context;
- struct e4_xstorm_iwarp_conn_ag_ctx xstorm_ag_context;
- struct e4_tstorm_iwarp_conn_ag_ctx tstorm_ag_context;
+ struct xstorm_iwarp_conn_ag_ctx xstorm_ag_context;
+ struct tstorm_iwarp_conn_ag_ctx tstorm_ag_context;
struct timers_context timer_context;
- struct e4_ustorm_rdma_conn_ag_ctx ustorm_ag_context;
+ struct ustorm_rdma_conn_ag_ctx ustorm_ag_context;
struct tstorm_iwarp_conn_st_ctx tstorm_st_context;
struct regpair tstorm_st_padding[2];
struct mstorm_iwarp_conn_st_ctx mstorm_st_context;
@@ -9731,8 +9164,8 @@ enum iwarp_eqe_async_opcode {
IWARP_EVENT_TYPE_ASYNC_EXCEPTION_DETECTED,
IWARP_EVENT_TYPE_ASYNC_QP_IN_ERROR_STATE,
IWARP_EVENT_TYPE_ASYNC_CQ_OVERFLOW,
- IWARP_EVENT_TYPE_ASYNC_SRQ_EMPTY,
IWARP_EVENT_TYPE_ASYNC_SRQ_LIMIT,
+ IWARP_EVENT_TYPE_ASYNC_SRQ_EMPTY,
MAX_IWARP_EQE_ASYNC_OPCODE
};
@@ -9750,8 +9183,7 @@ struct iwarp_eqe_data_tcp_async_completion {
/* iWARP completion queue types */
enum iwarp_eqe_sync_opcode {
- IWARP_EVENT_TYPE_TCP_OFFLOAD =
- 11,
+ IWARP_EVENT_TYPE_TCP_OFFLOAD = 13,
IWARP_EVENT_TYPE_MPA_OFFLOAD,
IWARP_EVENT_TYPE_MPA_OFFLOAD_SEND_RTR,
IWARP_EVENT_TYPE_CREATE_QP,
@@ -9783,8 +9215,6 @@ enum iwarp_fw_return_code {
IWARP_EXCEPTION_DETECTED_LLP_RESET,
IWARP_EXCEPTION_DETECTED_IRQ_FULL,
IWARP_EXCEPTION_DETECTED_RQ_EMPTY,
- IWARP_EXCEPTION_DETECTED_SRQ_EMPTY,
- IWARP_EXCEPTION_DETECTED_SRQ_LIMIT,
IWARP_EXCEPTION_DETECTED_LLP_TIMEOUT,
IWARP_EXCEPTION_DETECTED_REMOTE_PROTECTION_ERROR,
IWARP_EXCEPTION_DETECTED_CQ_OVERFLOW,
@@ -9878,9 +9308,10 @@ struct iwarp_mpa_offload_ramrod_data {
struct regpair async_eqe_output_buf;
struct regpair handle_for_async;
struct regpair shared_queue_addr;
+ __le32 additional_setup_time;
__le16 rcv_wnd;
u8 stats_counter_id;
- u8 reserved3[13];
+ u8 reserved3[9];
};
/* iWARP TCP connection offload params passed by driver to FW */
@@ -9888,11 +9319,13 @@ struct iwarp_offload_params {
struct mpa_ulp_buffer incoming_ulp_buffer;
struct regpair async_eqe_output_buf;
struct regpair handle_for_async;
+ __le32 additional_setup_time;
__le16 physical_q0;
__le16 physical_q1;
u8 stats_counter_id;
u8 mpa_mode;
- u8 reserved[10];
+ u8 src_vport_id;
+ u8 reserved[5];
};
/* iWARP query QP output params */
@@ -9912,7 +9345,7 @@ struct iwarp_query_qp_ramrod_data {
/* iWARP Ramrod Command IDs */
enum iwarp_ramrod_cmd_id {
- IWARP_RAMROD_CMD_ID_TCP_OFFLOAD = 11,
+ IWARP_RAMROD_CMD_ID_TCP_OFFLOAD = 13,
IWARP_RAMROD_CMD_ID_MPA_OFFLOAD,
IWARP_RAMROD_CMD_ID_MPA_OFFLOAD_SEND_RTR,
IWARP_RAMROD_CMD_ID_CREATE_QP,
@@ -9971,100 +9404,100 @@ struct unaligned_opaque_data {
__le32 cid;
};
-struct e4_mstorm_iwarp_conn_ag_ctx {
+struct mstorm_iwarp_conn_ag_ctx {
u8 reserved;
u8 state;
u8 flags0;
-#define E4_MSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_MSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define E4_MSTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_MSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_MASK 0x3
-#define E4_MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_SHIFT 2
-#define E4_MSTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_MSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 4
-#define E4_MSTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_MSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 6
+#define MSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define MSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define MSTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1
+#define MSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1
+#define MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_MASK 0x3
+#define MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_SHIFT 2
+#define MSTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3
+#define MSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 4
+#define MSTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3
+#define MSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define E4_MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_EN_MASK 0x1
-#define E4_MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_EN_SHIFT 0
-#define E4_MSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_MSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 1
-#define E4_MSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_MSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 2
-#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define E4_MSTORM_IWARP_CONN_AG_CTX_RCQ_CONS_EN_MASK 0x1
-#define E4_MSTORM_IWARP_CONN_AG_CTX_RCQ_CONS_EN_SHIFT 6
-#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_EN_MASK 0x1
+#define MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_EN_SHIFT 0
+#define MSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1
+#define MSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 1
+#define MSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1
+#define MSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 2
+#define MSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define MSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define MSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define MSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define MSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define MSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define MSTORM_IWARP_CONN_AG_CTX_RCQ_CONS_EN_MASK 0x1
+#define MSTORM_IWARP_CONN_AG_CTX_RCQ_CONS_EN_SHIFT 6
+#define MSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define MSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 7
__le16 rcq_cons;
__le16 rcq_cons_th;
__le32 reg0;
__le32 reg1;
};
-struct e4_ustorm_iwarp_conn_ag_ctx {
+struct ustorm_iwarp_conn_ag_ctx {
u8 reserved;
u8 byte1;
u8 flags0;
-#define E4_USTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_USTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define E4_USTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_USTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_USTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_USTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 2
-#define E4_USTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_USTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 4
-#define E4_USTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_USTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 6
+#define USTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define USTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1
+#define USTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3
+#define USTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 2
+#define USTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3
+#define USTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 4
+#define USTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3
+#define USTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define E4_USTORM_IWARP_CONN_AG_CTX_CF3_MASK 0x3
-#define E4_USTORM_IWARP_CONN_AG_CTX_CF3_SHIFT 0
-#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_MASK 0x3
-#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_SHIFT 2
-#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_MASK 0x3
-#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_SHIFT 4
-#define E4_USTORM_IWARP_CONN_AG_CTX_CF6_MASK 0x3
-#define E4_USTORM_IWARP_CONN_AG_CTX_CF6_SHIFT 6
+#define USTORM_IWARP_CONN_AG_CTX_CF3_MASK 0x3
+#define USTORM_IWARP_CONN_AG_CTX_CF3_SHIFT 0
+#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_MASK 0x3
+#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_SHIFT 2
+#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_MASK 0x3
+#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_SHIFT 4
+#define USTORM_IWARP_CONN_AG_CTX_CF6_MASK 0x3
+#define USTORM_IWARP_CONN_AG_CTX_CF6_SHIFT 6
u8 flags2;
-#define E4_USTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_USTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 0
-#define E4_USTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_USTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 1
-#define E4_USTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_USTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 2
-#define E4_USTORM_IWARP_CONN_AG_CTX_CF3EN_MASK 0x1
-#define E4_USTORM_IWARP_CONN_AG_CTX_CF3EN_SHIFT 3
-#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_EN_MASK 0x1
-#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_EN_SHIFT 4
-#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_EN_MASK 0x1
-#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_EN_SHIFT 5
-#define E4_USTORM_IWARP_CONN_AG_CTX_CF6EN_MASK 0x1
-#define E4_USTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT 6
-#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_SE_EN_MASK 0x1
-#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_SE_EN_SHIFT 7
+#define USTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 0
+#define USTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 1
+#define USTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 2
+#define USTORM_IWARP_CONN_AG_CTX_CF3EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_CF3EN_SHIFT 3
+#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_EN_SHIFT 4
+#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_EN_SHIFT 5
+#define USTORM_IWARP_CONN_AG_CTX_CF6EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT 6
+#define USTORM_IWARP_CONN_AG_CTX_CQ_SE_EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_CQ_SE_EN_SHIFT 7
u8 flags3;
-#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_EN_MASK 0x1
-#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_EN_SHIFT 0
-#define E4_USTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_USTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define E4_USTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_USTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define E4_USTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_USTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define E4_USTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define E4_USTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define E4_USTORM_IWARP_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define E4_USTORM_IWARP_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define E4_USTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define E4_USTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define E4_USTORM_IWARP_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define E4_USTORM_IWARP_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define USTORM_IWARP_CONN_AG_CTX_CQ_EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_CQ_EN_SHIFT 0
+#define USTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define USTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define USTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define USTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define USTORM_IWARP_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define USTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define USTORM_IWARP_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_RULE8EN_SHIFT 7
u8 byte2;
u8 byte3;
__le16 word0;
@@ -10077,37 +9510,37 @@ struct e4_ustorm_iwarp_conn_ag_ctx {
__le16 word3;
};
-struct e4_ystorm_iwarp_conn_ag_ctx {
+struct ystorm_iwarp_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define E4_YSTORM_IWARP_CONN_AG_CTX_BIT0_MASK 0x1
-#define E4_YSTORM_IWARP_CONN_AG_CTX_BIT0_SHIFT 0
-#define E4_YSTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_YSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_YSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_YSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 2
-#define E4_YSTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_YSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 4
-#define E4_YSTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_YSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 6
+#define YSTORM_IWARP_CONN_AG_CTX_BIT0_MASK 0x1
+#define YSTORM_IWARP_CONN_AG_CTX_BIT0_SHIFT 0
+#define YSTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1
+#define YSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1
+#define YSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3
+#define YSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 2
+#define YSTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3
+#define YSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 4
+#define YSTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3
+#define YSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define E4_YSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_YSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 0
-#define E4_YSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_YSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 1
-#define E4_YSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_YSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 2
-#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define YSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1
+#define YSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 0
+#define YSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1
+#define YSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 1
+#define YSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1
+#define YSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 2
+#define YSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define YSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define YSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define YSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define YSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define YSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define YSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define YSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define YSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define YSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 7
u8 byte2;
u8 byte3;
__le16 word0;
@@ -10297,216 +9730,216 @@ struct xstorm_fcoe_conn_st_ctx {
struct fcoe_wqe cached_wqes[16];
};
-struct e4_xstorm_fcoe_conn_ag_ctx {
+struct xstorm_fcoe_conn_ag_ctx {
u8 reserved0;
u8 state;
u8 flags0;
-#define E4_XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED1_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED1_SHIFT 1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED2_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED2_SHIFT 2
-#define E4_XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED3_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED3_SHIFT 4
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED4_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED4_SHIFT 5
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED5_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED5_SHIFT 6
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED6_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED6_SHIFT 7
+#define XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED1_SHIFT 1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED2_SHIFT 2
+#define XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED3_SHIFT 4
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED4_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED4_SHIFT 5
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED5_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED5_SHIFT 6
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED6_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED6_SHIFT 7
u8 flags1;
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED7_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED7_SHIFT 0
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED8_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED8_SHIFT 1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED9_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED9_SHIFT 2
-#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT11_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT11_SHIFT 3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT12_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT12_SHIFT 4
-#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT13_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT13_SHIFT 5
-#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT14_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT14_SHIFT 6
-#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT15_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT15_SHIFT 7
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED7_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED7_SHIFT 0
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED8_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED8_SHIFT 1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED9_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED9_SHIFT 2
+#define XSTORM_FCOE_CONN_AG_CTX_BIT11_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_BIT11_SHIFT 3
+#define XSTORM_FCOE_CONN_AG_CTX_BIT12_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_BIT12_SHIFT 4
+#define XSTORM_FCOE_CONN_AG_CTX_BIT13_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_BIT13_SHIFT 5
+#define XSTORM_FCOE_CONN_AG_CTX_BIT14_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_BIT14_SHIFT 6
+#define XSTORM_FCOE_CONN_AG_CTX_BIT15_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_BIT15_SHIFT 7
u8 flags2;
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 0
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 2
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 4
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF3_MASK 0x3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF3_SHIFT 6
+#define XSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 0
+#define XSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 2
+#define XSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 4
+#define XSTORM_FCOE_CONN_AG_CTX_CF3_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF3_SHIFT 6
u8 flags3;
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF4_MASK 0x3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF4_SHIFT 0
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF5_MASK 0x3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF5_SHIFT 2
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF6_MASK 0x3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF6_SHIFT 4
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF7_MASK 0x3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF7_SHIFT 6
+#define XSTORM_FCOE_CONN_AG_CTX_CF4_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF4_SHIFT 0
+#define XSTORM_FCOE_CONN_AG_CTX_CF5_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF5_SHIFT 2
+#define XSTORM_FCOE_CONN_AG_CTX_CF6_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF6_SHIFT 4
+#define XSTORM_FCOE_CONN_AG_CTX_CF7_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF7_SHIFT 6
u8 flags4;
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF8_MASK 0x3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF8_SHIFT 0
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF9_MASK 0x3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF9_SHIFT 2
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF10_MASK 0x3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF10_SHIFT 4
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF11_MASK 0x3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF11_SHIFT 6
+#define XSTORM_FCOE_CONN_AG_CTX_CF8_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF8_SHIFT 0
+#define XSTORM_FCOE_CONN_AG_CTX_CF9_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF9_SHIFT 2
+#define XSTORM_FCOE_CONN_AG_CTX_CF10_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF10_SHIFT 4
+#define XSTORM_FCOE_CONN_AG_CTX_CF11_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF11_SHIFT 6
u8 flags5;
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF12_MASK 0x3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF12_SHIFT 0
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF13_MASK 0x3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF13_SHIFT 2
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF14_MASK 0x3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF14_SHIFT 4
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF15_MASK 0x3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF15_SHIFT 6
+#define XSTORM_FCOE_CONN_AG_CTX_CF12_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF12_SHIFT 0
+#define XSTORM_FCOE_CONN_AG_CTX_CF13_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF13_SHIFT 2
+#define XSTORM_FCOE_CONN_AG_CTX_CF14_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF14_SHIFT 4
+#define XSTORM_FCOE_CONN_AG_CTX_CF15_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF15_SHIFT 6
u8 flags6;
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF16_MASK 0x3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF16_SHIFT 0
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF17_MASK 0x3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF17_SHIFT 2
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF18_MASK 0x3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF18_SHIFT 4
-#define E4_XSTORM_FCOE_CONN_AG_CTX_DQ_CF_MASK 0x3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_DQ_CF_SHIFT 6
+#define XSTORM_FCOE_CONN_AG_CTX_CF16_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF16_SHIFT 0
+#define XSTORM_FCOE_CONN_AG_CTX_CF17_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF17_SHIFT 2
+#define XSTORM_FCOE_CONN_AG_CTX_CF18_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF18_SHIFT 4
+#define XSTORM_FCOE_CONN_AG_CTX_DQ_CF_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_DQ_CF_SHIFT 6
u8 flags7;
-#define E4_XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED10_MASK 0x3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED10_SHIFT 2
-#define E4_XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_MASK 0x3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_SHIFT 4
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 6
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 7
+#define XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED10_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED10_SHIFT 2
+#define XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define XSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 6
+#define XSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 7
u8 flags8;
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 0
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF3EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF3EN_SHIFT 1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF4EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT 2
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF5EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT 3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF6EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT 4
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF7EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF7EN_SHIFT 5
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF8EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF8EN_SHIFT 6
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF9EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF9EN_SHIFT 7
+#define XSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 0
+#define XSTORM_FCOE_CONN_AG_CTX_CF3EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF3EN_SHIFT 1
+#define XSTORM_FCOE_CONN_AG_CTX_CF4EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT 2
+#define XSTORM_FCOE_CONN_AG_CTX_CF5EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT 3
+#define XSTORM_FCOE_CONN_AG_CTX_CF6EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT 4
+#define XSTORM_FCOE_CONN_AG_CTX_CF7EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF7EN_SHIFT 5
+#define XSTORM_FCOE_CONN_AG_CTX_CF8EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF8EN_SHIFT 6
+#define XSTORM_FCOE_CONN_AG_CTX_CF9EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF9EN_SHIFT 7
u8 flags9;
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF10EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF10EN_SHIFT 0
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF11EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF11EN_SHIFT 1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF12EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF12EN_SHIFT 2
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF13EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF13EN_SHIFT 3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF14EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF14EN_SHIFT 4
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF15EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF15EN_SHIFT 5
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF16EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF16EN_SHIFT 6
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF17EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF17EN_SHIFT 7
+#define XSTORM_FCOE_CONN_AG_CTX_CF10EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF10EN_SHIFT 0
+#define XSTORM_FCOE_CONN_AG_CTX_CF11EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF11EN_SHIFT 1
+#define XSTORM_FCOE_CONN_AG_CTX_CF12EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF12EN_SHIFT 2
+#define XSTORM_FCOE_CONN_AG_CTX_CF13EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF13EN_SHIFT 3
+#define XSTORM_FCOE_CONN_AG_CTX_CF14EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF14EN_SHIFT 4
+#define XSTORM_FCOE_CONN_AG_CTX_CF15EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF15EN_SHIFT 5
+#define XSTORM_FCOE_CONN_AG_CTX_CF16EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF16EN_SHIFT 6
+#define XSTORM_FCOE_CONN_AG_CTX_CF17EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF17EN_SHIFT 7
u8 flags10;
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF18EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF18EN_SHIFT 0
-#define E4_XSTORM_FCOE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_DQ_CF_EN_SHIFT 1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED11_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED11_SHIFT 3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF23EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF23EN_SHIFT 5
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED12_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED12_SHIFT 6
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED13_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED13_SHIFT 7
+#define XSTORM_FCOE_CONN_AG_CTX_CF18EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF18EN_SHIFT 0
+#define XSTORM_FCOE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_DQ_CF_EN_SHIFT 1
+#define XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED11_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED11_SHIFT 3
+#define XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define XSTORM_FCOE_CONN_AG_CTX_CF23EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF23EN_SHIFT 5
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED12_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED12_SHIFT 6
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED13_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED13_SHIFT 7
u8 flags11;
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED14_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED14_SHIFT 0
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED15_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED15_SHIFT 1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED16_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED16_SHIFT 2
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT 3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT 4
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT 5
-#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
-#define E4_XSTORM_FCOE_CONN_AG_CTX_XFERQ_DECISION_EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_XFERQ_DECISION_EN_SHIFT 7
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED14_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED14_SHIFT 0
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED15_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED15_SHIFT 1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED16_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED16_SHIFT 2
+#define XSTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define XSTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define XSTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define XSTORM_FCOE_CONN_AG_CTX_XFERQ_DECISION_EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_XFERQ_DECISION_EN_SHIFT 7
u8 flags12;
-#define E4_XSTORM_FCOE_CONN_AG_CTX_SQ_DECISION_EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_SQ_DECISION_EN_SHIFT 0
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE11EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE11EN_SHIFT 1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
-#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE14EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE14EN_SHIFT 4
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE15EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE15EN_SHIFT 5
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE16EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE16EN_SHIFT 6
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE17EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE17EN_SHIFT 7
+#define XSTORM_FCOE_CONN_AG_CTX_SQ_DECISION_EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_SQ_DECISION_EN_SHIFT 0
+#define XSTORM_FCOE_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define XSTORM_FCOE_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RULE14EN_SHIFT 4
+#define XSTORM_FCOE_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define XSTORM_FCOE_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define XSTORM_FCOE_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RULE17EN_SHIFT 7
u8 flags13;
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESPQ_DECISION_EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESPQ_DECISION_EN_SHIFT 0
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE19EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE19EN_SHIFT 1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
-#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
-#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
-#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
-#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+#define XSTORM_FCOE_CONN_AG_CTX_RESPQ_DECISION_EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESPQ_DECISION_EN_SHIFT 0
+#define XSTORM_FCOE_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RULE19EN_SHIFT 1
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
u8 flags14;
-#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT16_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT16_SHIFT 0
-#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT17_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT17_SHIFT 1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT18_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT18_SHIFT 2
-#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT19_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT19_SHIFT 3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT20_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT20_SHIFT 4
-#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT21_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT21_SHIFT 5
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF23_MASK 0x3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF23_SHIFT 6
+#define XSTORM_FCOE_CONN_AG_CTX_BIT16_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_BIT16_SHIFT 0
+#define XSTORM_FCOE_CONN_AG_CTX_BIT17_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_BIT17_SHIFT 1
+#define XSTORM_FCOE_CONN_AG_CTX_BIT18_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_BIT18_SHIFT 2
+#define XSTORM_FCOE_CONN_AG_CTX_BIT19_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_BIT19_SHIFT 3
+#define XSTORM_FCOE_CONN_AG_CTX_BIT20_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_BIT20_SHIFT 4
+#define XSTORM_FCOE_CONN_AG_CTX_BIT21_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_BIT21_SHIFT 5
+#define XSTORM_FCOE_CONN_AG_CTX_CF23_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF23_SHIFT 6
u8 byte2;
__le16 physical_q0;
__le16 word1;
@@ -10544,150 +9977,150 @@ struct ustorm_fcoe_conn_st_ctx {
u8 reserved[2];
};
-struct e4_tstorm_fcoe_conn_ag_ctx {
+struct tstorm_fcoe_conn_ag_ctx {
u8 reserved0;
u8 state;
u8 flags0;
-#define E4_TSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT2_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT2_SHIFT 2
-#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT3_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT3_SHIFT 3
-#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT4_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT4_SHIFT 4
-#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT5_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT5_SHIFT 5
-#define E4_TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_MASK 0x3
-#define E4_TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_SHIFT 6
+#define TSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define TSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1
+#define TSTORM_FCOE_CONN_AG_CTX_BIT2_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_BIT2_SHIFT 2
+#define TSTORM_FCOE_CONN_AG_CTX_BIT3_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_BIT3_SHIFT 3
+#define TSTORM_FCOE_CONN_AG_CTX_BIT4_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_BIT4_SHIFT 4
+#define TSTORM_FCOE_CONN_AG_CTX_BIT5_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_BIT5_SHIFT 5
+#define TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_MASK 0x3
+#define TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_SHIFT 6
u8 flags1;
-#define E4_TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
-#define E4_TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 0
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 2
-#define E4_TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3
-#define E4_TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT 4
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF4_MASK 0x3
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF4_SHIFT 6
+#define TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
+#define TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 0
+#define TSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3
+#define TSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 2
+#define TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3
+#define TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT 4
+#define TSTORM_FCOE_CONN_AG_CTX_CF4_MASK 0x3
+#define TSTORM_FCOE_CONN_AG_CTX_CF4_SHIFT 6
u8 flags2;
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF5_MASK 0x3
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF5_SHIFT 0
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF6_MASK 0x3
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF6_SHIFT 2
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF7_MASK 0x3
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF7_SHIFT 4
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF8_MASK 0x3
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF8_SHIFT 6
+#define TSTORM_FCOE_CONN_AG_CTX_CF5_MASK 0x3
+#define TSTORM_FCOE_CONN_AG_CTX_CF5_SHIFT 0
+#define TSTORM_FCOE_CONN_AG_CTX_CF6_MASK 0x3
+#define TSTORM_FCOE_CONN_AG_CTX_CF6_SHIFT 2
+#define TSTORM_FCOE_CONN_AG_CTX_CF7_MASK 0x3
+#define TSTORM_FCOE_CONN_AG_CTX_CF7_SHIFT 4
+#define TSTORM_FCOE_CONN_AG_CTX_CF8_MASK 0x3
+#define TSTORM_FCOE_CONN_AG_CTX_CF8_SHIFT 6
u8 flags3;
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF9_MASK 0x3
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF9_SHIFT 0
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF10_MASK 0x3
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF10_SHIFT 2
-#define E4_TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN_SHIFT 4
-#define E4_TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 6
-#define E4_TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7
+#define TSTORM_FCOE_CONN_AG_CTX_CF9_MASK 0x3
+#define TSTORM_FCOE_CONN_AG_CTX_CF9_SHIFT 0
+#define TSTORM_FCOE_CONN_AG_CTX_CF10_MASK 0x3
+#define TSTORM_FCOE_CONN_AG_CTX_CF10_SHIFT 2
+#define TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN_SHIFT 4
+#define TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5
+#define TSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 6
+#define TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7
u8 flags4;
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF4EN_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT 0
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF5EN_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT 1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF6EN_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT 2
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF7EN_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF7EN_SHIFT 3
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF8EN_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF8EN_SHIFT 4
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF9EN_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF9EN_SHIFT 5
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF10EN_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF10EN_SHIFT 6
-#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define TSTORM_FCOE_CONN_AG_CTX_CF4EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT 0
+#define TSTORM_FCOE_CONN_AG_CTX_CF5EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT 1
+#define TSTORM_FCOE_CONN_AG_CTX_CF6EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT 2
+#define TSTORM_FCOE_CONN_AG_CTX_CF7EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_CF7EN_SHIFT 3
+#define TSTORM_FCOE_CONN_AG_CTX_CF8EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_CF8EN_SHIFT 4
+#define TSTORM_FCOE_CONN_AG_CTX_CF9EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_CF9EN_SHIFT 5
+#define TSTORM_FCOE_CONN_AG_CTX_CF10EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_CF10EN_SHIFT 6
+#define TSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags5;
-#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define TSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define TSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define TSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define TSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define TSTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define TSTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define TSTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define TSTORM_FCOE_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_RULE8EN_SHIFT 7
__le32 reg0;
__le32 reg1;
};
-struct e4_ustorm_fcoe_conn_ag_ctx {
+struct ustorm_fcoe_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define E4_USTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1
-#define E4_USTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0
-#define E4_USTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_USTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6
+#define USTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0
+#define USTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1
+#define USTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3
+#define USTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2
+#define USTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3
+#define USTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4
+#define USTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3
+#define USTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF3_MASK 0x3
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF3_SHIFT 0
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF4_MASK 0x3
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF4_SHIFT 2
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF5_MASK 0x3
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF5_SHIFT 4
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF6_MASK 0x3
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF6_SHIFT 6
+#define USTORM_FCOE_CONN_AG_CTX_CF3_MASK 0x3
+#define USTORM_FCOE_CONN_AG_CTX_CF3_SHIFT 0
+#define USTORM_FCOE_CONN_AG_CTX_CF4_MASK 0x3
+#define USTORM_FCOE_CONN_AG_CTX_CF4_SHIFT 2
+#define USTORM_FCOE_CONN_AG_CTX_CF5_MASK 0x3
+#define USTORM_FCOE_CONN_AG_CTX_CF5_SHIFT 4
+#define USTORM_FCOE_CONN_AG_CTX_CF6_MASK 0x3
+#define USTORM_FCOE_CONN_AG_CTX_CF6_SHIFT 6
u8 flags2;
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF3EN_MASK 0x1
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF3EN_SHIFT 3
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF4EN_MASK 0x1
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT 4
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF5EN_MASK 0x1
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT 5
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF6EN_MASK 0x1
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT 6
-#define E4_USTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_USTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define USTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define USTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define USTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define USTORM_FCOE_CONN_AG_CTX_CF3EN_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_CF3EN_SHIFT 3
+#define USTORM_FCOE_CONN_AG_CTX_CF4EN_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT 4
+#define USTORM_FCOE_CONN_AG_CTX_CF5EN_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT 5
+#define USTORM_FCOE_CONN_AG_CTX_CF6EN_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT 6
+#define USTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags3;
-#define E4_USTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_USTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define E4_USTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_USTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define E4_USTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_USTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define E4_USTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_USTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define E4_USTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define E4_USTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define E4_USTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define E4_USTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define E4_USTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define E4_USTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define E4_USTORM_FCOE_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define E4_USTORM_FCOE_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define USTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define USTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define USTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define USTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define USTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define USTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define USTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define USTORM_FCOE_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_RULE8EN_SHIFT 7
u8 byte2;
u8 byte3;
__le16 word0;
@@ -10728,37 +10161,37 @@ struct tstorm_fcoe_conn_st_ctx {
u8 reserved0[4];
};
-struct e4_mstorm_fcoe_conn_ag_ctx {
+struct mstorm_fcoe_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define E4_MSTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1
-#define E4_MSTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0
-#define E4_MSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_MSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_MSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_MSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2
-#define E4_MSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_MSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4
-#define E4_MSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_MSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6
+#define MSTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1
+#define MSTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0
+#define MSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1
+#define MSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1
+#define MSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3
+#define MSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2
+#define MSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3
+#define MSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4
+#define MSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3
+#define MSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define E4_MSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_MSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0
-#define E4_MSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_MSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1
-#define E4_MSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_MSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2
-#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define MSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define MSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define MSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define MSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define MSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define MSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define MSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define MSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define MSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define MSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define MSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define MSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define MSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define MSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define MSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define MSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 7
__le16 word0;
__le16 word1;
__le32 reg0;
@@ -10804,21 +10237,21 @@ struct mstorm_fcoe_conn_st_ctx {
};
/* fcoe connection context */
-struct e4_fcoe_conn_context {
+struct fcoe_conn_context {
struct ystorm_fcoe_conn_st_ctx ystorm_st_context;
struct pstorm_fcoe_conn_st_ctx pstorm_st_context;
struct regpair pstorm_st_padding[2];
struct xstorm_fcoe_conn_st_ctx xstorm_st_context;
- struct e4_xstorm_fcoe_conn_ag_ctx xstorm_ag_context;
+ struct xstorm_fcoe_conn_ag_ctx xstorm_ag_context;
struct regpair xstorm_ag_padding[6];
struct ustorm_fcoe_conn_st_ctx ustorm_st_context;
struct regpair ustorm_st_padding[2];
- struct e4_tstorm_fcoe_conn_ag_ctx tstorm_ag_context;
+ struct tstorm_fcoe_conn_ag_ctx tstorm_ag_context;
struct regpair tstorm_ag_padding[2];
struct timers_context timer_context;
- struct e4_ustorm_fcoe_conn_ag_ctx ustorm_ag_context;
+ struct ustorm_fcoe_conn_ag_ctx ustorm_ag_context;
struct tstorm_fcoe_conn_st_ctx tstorm_st_context;
- struct e4_mstorm_fcoe_conn_ag_ctx mstorm_ag_context;
+ struct mstorm_fcoe_conn_ag_ctx mstorm_ag_context;
struct mstorm_fcoe_conn_st_ctx mstorm_st_context;
};
@@ -10869,37 +10302,37 @@ struct fcoe_stat_ramrod_params {
struct fcoe_stat_ramrod_data stat_ramrod_data;
};
-struct e4_ystorm_fcoe_conn_ag_ctx {
+struct ystorm_fcoe_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define E4_YSTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1
-#define E4_YSTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0
-#define E4_YSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_YSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_YSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_YSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2
-#define E4_YSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_YSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4
-#define E4_YSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_YSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6
+#define YSTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1
+#define YSTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0
+#define YSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1
+#define YSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1
+#define YSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3
+#define YSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2
+#define YSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3
+#define YSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4
+#define YSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3
+#define YSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define E4_YSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_YSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0
-#define E4_YSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_YSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1
-#define E4_YSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_YSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2
-#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define YSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define YSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define YSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define YSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define YSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define YSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define YSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define YSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define YSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define YSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define YSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define YSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define YSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define YSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define YSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define YSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 7
u8 byte2;
u8 byte3;
__le16 word0;
@@ -10930,216 +10363,216 @@ struct xstorm_iscsi_tcp_conn_st_ctx {
__le32 reserved_iscsi[44];
};
-struct e4_xstorm_iscsi_conn_ag_ctx {
+struct xstorm_iscsi_conn_ag_ctx {
u8 cdu_validation;
u8 state;
u8 flags0;
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM1_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM1_SHIFT 1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RESERVED1_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RESERVED1_SHIFT 2
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT4_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT4_SHIFT 4
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RESERVED2_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RESERVED2_SHIFT 5
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT6_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT6_SHIFT 6
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT7_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT7_SHIFT 7
+#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM1_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM1_SHIFT 1
+#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED1_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT4_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT4_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED2_SHIFT 5
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT6_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT6_SHIFT 6
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT7_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT7_SHIFT 7
u8 flags1;
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT8_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT8_SHIFT 0
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT9_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT9_SHIFT 1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT10_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT10_SHIFT 2
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT11_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT11_SHIFT 3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT12_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT12_SHIFT 4
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT13_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT13_SHIFT 5
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT14_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT14_SHIFT 6
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_TX_TRUNCATE_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_TX_TRUNCATE_SHIFT 7
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT8_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT8_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT9_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT9_SHIFT 1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT10_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT10_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT11_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT11_SHIFT 3
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT12_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT12_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT13_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT13_SHIFT 5
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT14_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT14_SHIFT 6
+#define XSTORM_ISCSI_CONN_AG_CTX_TX_TRUNCATE_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_TX_TRUNCATE_SHIFT 7
u8 flags2;
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 0
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 2
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 4
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 6
+#define XSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 6
u8 flags3;
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 0
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 2
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 4
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF7_MASK 0x3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF7_SHIFT 6
+#define XSTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_CF7_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF7_SHIFT 6
u8 flags4;
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF8_MASK 0x3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF8_SHIFT 0
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF9_MASK 0x3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF9_SHIFT 2
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF10_MASK 0x3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF10_SHIFT 4
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF11_MASK 0x3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF11_SHIFT 6
+#define XSTORM_ISCSI_CONN_AG_CTX_CF8_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF8_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_CF9_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF9_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_CF10_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF10_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_CF11_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF11_SHIFT 6
u8 flags5;
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF12_MASK 0x3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF12_SHIFT 0
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF13_MASK 0x3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF13_SHIFT 2
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF14_MASK 0x3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF14_SHIFT 4
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_MASK 0x3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_SHIFT 6
+#define XSTORM_ISCSI_CONN_AG_CTX_CF12_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF12_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_CF13_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF13_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_CF14_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF14_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_SHIFT 6
u8 flags6;
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF16_MASK 0x3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF16_SHIFT 0
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF17_MASK 0x3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF17_SHIFT 2
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF18_MASK 0x3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF18_SHIFT 4
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_MASK 0x3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_SHIFT 6
+#define XSTORM_ISCSI_CONN_AG_CTX_CF16_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF16_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_CF17_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF17_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_CF18_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF18_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_SHIFT 6
u8 flags7;
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_MASK 0x3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_SHIFT 0
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_MASK 0x3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_SHIFT 2
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_MASK 0x3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_SHIFT 4
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 6
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 7
+#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 6
+#define XSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 7
u8 flags8;
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 0
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 2
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 4
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF7EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF7EN_SHIFT 5
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF8EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF8EN_SHIFT 6
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF9EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF9EN_SHIFT 7
+#define XSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_CF7EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF7EN_SHIFT 5
+#define XSTORM_ISCSI_CONN_AG_CTX_CF8EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF8EN_SHIFT 6
+#define XSTORM_ISCSI_CONN_AG_CTX_CF9EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF9EN_SHIFT 7
u8 flags9;
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF10EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF10EN_SHIFT 0
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF11EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF11EN_SHIFT 1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF12EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF12EN_SHIFT 2
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF13EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF13EN_SHIFT 3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF14EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF14EN_SHIFT 4
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_EN_SHIFT 5
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF16EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF16EN_SHIFT 6
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF17EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF17EN_SHIFT 7
+#define XSTORM_ISCSI_CONN_AG_CTX_CF10EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF10EN_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_CF11EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF11EN_SHIFT 1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF12EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF12EN_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_CF13EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF13EN_SHIFT 3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF14EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF14EN_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_EN_SHIFT 5
+#define XSTORM_ISCSI_CONN_AG_CTX_CF16EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF16EN_SHIFT 6
+#define XSTORM_ISCSI_CONN_AG_CTX_CF17EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF17EN_SHIFT 7
u8 flags10;
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF18EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF18EN_SHIFT 0
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_EN_SHIFT 1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_EN_SHIFT 2
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_EN_SHIFT 3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_EN_SHIFT 5
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 6
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_MORE_TO_SEND_DEC_RULE_EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_MORE_TO_SEND_DEC_RULE_EN_SHIFT 7
+#define XSTORM_ISCSI_CONN_AG_CTX_CF18EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF18EN_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_EN_SHIFT 1
+#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_EN_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_EN_SHIFT 3
+#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_EN_SHIFT 5
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 6
+#define XSTORM_ISCSI_CONN_AG_CTX_MORE_TO_SEND_DEC_RULE_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_MORE_TO_SEND_DEC_RULE_EN_SHIFT 7
u8 flags11;
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_TX_BLOCKED_EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_TX_BLOCKED_EN_SHIFT 0
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RESERVED3_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RESERVED3_SHIFT 2
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 4
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 5
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE9EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE9EN_SHIFT 7
+#define XSTORM_ISCSI_CONN_AG_CTX_TX_BLOCKED_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_TX_BLOCKED_EN_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 1
+#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED3_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE9EN_SHIFT 7
u8 flags12;
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_SQ_DEC_RULE_EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_SQ_DEC_RULE_EN_SHIFT 0
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE11EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE11EN_SHIFT 1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE14EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE14EN_SHIFT 4
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE15EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE15EN_SHIFT 5
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE16EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE16EN_SHIFT 6
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE17EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE17EN_SHIFT 7
+#define XSTORM_ISCSI_CONN_AG_CTX_SQ_DEC_RULE_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_SQ_DEC_RULE_EN_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE14EN_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE17EN_SHIFT 7
u8 flags13;
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_R2TQ_DEC_RULE_EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_R2TQ_DEC_RULE_EN_SHIFT 0
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_HQ_DEC_RULE_EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_HQ_DEC_RULE_EN_SHIFT 1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+#define XSTORM_ISCSI_CONN_AG_CTX_R2TQ_DEC_RULE_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_R2TQ_DEC_RULE_EN_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_HQ_DEC_RULE_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_HQ_DEC_RULE_EN_SHIFT 1
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
u8 flags14;
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT16_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT16_SHIFT 0
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT17_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT17_SHIFT 1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT18_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT18_SHIFT 2
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT19_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT19_SHIFT 3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT20_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT20_SHIFT 4
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_DUMMY_READ_DONE_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_DUMMY_READ_DONE_SHIFT 5
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_MASK 0x3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_SHIFT 6
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT16_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT16_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT17_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT17_SHIFT 1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT18_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT18_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT19_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT19_SHIFT 3
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT20_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT20_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_DUMMY_READ_DONE_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_DUMMY_READ_DONE_SHIFT 5
+#define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_SHIFT 6
u8 byte2;
__le16 physical_q0;
__le16 physical_q1;
@@ -11187,89 +10620,89 @@ struct e4_xstorm_iscsi_conn_ag_ctx {
__le32 reg17;
};
-struct e4_tstorm_iscsi_conn_ag_ctx {
+struct tstorm_iscsi_conn_ag_ctx {
u8 reserved0;
u8 state;
u8 flags0;
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT2_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT2_SHIFT 2
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT3_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT3_SHIFT 3
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT4_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT4_SHIFT 4
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT5_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT5_SHIFT 5
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 6
+#define TSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define TSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1
+#define TSTORM_ISCSI_CONN_AG_CTX_BIT2_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_BIT2_SHIFT 2
+#define TSTORM_ISCSI_CONN_AG_CTX_BIT3_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_BIT3_SHIFT 3
+#define TSTORM_ISCSI_CONN_AG_CTX_BIT4_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_BIT4_SHIFT 4
+#define TSTORM_ISCSI_CONN_AG_CTX_BIT5_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_BIT5_SHIFT 5
+#define TSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 6
u8 flags1;
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_MASK 0x3
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_SHIFT 0
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_MASK 0x3
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_SHIFT 2
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 4
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 6
+#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_MASK 0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_SHIFT 0
+#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_MASK 0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_SHIFT 2
+#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 4
+#define TSTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 6
u8 flags2;
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 0
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 2
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF7_MASK 0x3
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF7_SHIFT 4
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF8_MASK 0x3
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF8_SHIFT 6
+#define TSTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 0
+#define TSTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 2
+#define TSTORM_ISCSI_CONN_AG_CTX_CF7_MASK 0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_CF7_SHIFT 4
+#define TSTORM_ISCSI_CONN_AG_CTX_CF8_MASK 0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_CF8_SHIFT 6
u8 flags3;
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_OOO_ISLES_CF_MASK 0x3
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_OOO_ISLES_CF_SHIFT 2
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 4
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_EN_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_EN_SHIFT 5
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_EN_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_EN_SHIFT 6
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 7
+#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_OOO_ISLES_CF_MASK 0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_OOO_ISLES_CF_SHIFT 2
+#define TSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 4
+#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_EN_SHIFT 5
+#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_EN_SHIFT 6
+#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 7
u8 flags4;
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 0
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 2
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF7EN_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF7EN_SHIFT 3
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF8EN_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF8EN_SHIFT 4
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 5
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_OOO_ISLES_CF_EN_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_OOO_ISLES_CF_EN_SHIFT 6
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define TSTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 0
+#define TSTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 1
+#define TSTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 2
+#define TSTORM_ISCSI_CONN_AG_CTX_CF7EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_CF7EN_SHIFT 3
+#define TSTORM_ISCSI_CONN_AG_CTX_CF8EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_CF8EN_SHIFT 4
+#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 5
+#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_OOO_ISLES_CF_EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_OOO_ISLES_CF_EN_SHIFT 6
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags5;
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE8EN_SHIFT 7
__le32 reg0;
__le32 reg1;
__le32 rx_tcp_checksum_err_cnt;
@@ -11284,63 +10717,63 @@ struct e4_tstorm_iscsi_conn_ag_ctx {
__le16 word0;
};
-struct e4_ustorm_iscsi_conn_ag_ctx {
+struct ustorm_iscsi_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define E4_USTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1
-#define E4_USTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0
-#define E4_USTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_USTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6
+#define USTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0
+#define USTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1
+#define USTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
+#define USTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2
+#define USTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3
+#define USTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4
+#define USTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3
+#define USTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF3_MASK 0x3
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF3_SHIFT 0
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 2
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 4
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 6
+#define USTORM_ISCSI_CONN_AG_CTX_CF3_MASK 0x3
+#define USTORM_ISCSI_CONN_AG_CTX_CF3_SHIFT 0
+#define USTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3
+#define USTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 2
+#define USTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3
+#define USTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 4
+#define USTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3
+#define USTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 6
u8 flags2;
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF3EN_MASK 0x1
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF3EN_SHIFT 3
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 4
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 5
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 6
-#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define USTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0
+#define USTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1
+#define USTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2
+#define USTORM_ISCSI_CONN_AG_CTX_CF3EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_CF3EN_SHIFT 3
+#define USTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 4
+#define USTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 5
+#define USTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 6
+#define USTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags3;
-#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define USTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define USTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define USTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define USTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define USTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define USTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define USTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define USTORM_ISCSI_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_RULE8EN_SHIFT 7
u8 byte2;
u8 byte3;
__le16 word0;
@@ -11358,37 +10791,37 @@ struct tstorm_iscsi_conn_st_ctx {
__le32 reserved[44];
};
-struct e4_mstorm_iscsi_conn_ag_ctx {
+struct mstorm_iscsi_conn_ag_ctx {
u8 reserved;
u8 state;
u8 flags0;
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6
+#define MSTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1
+#define MSTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0
+#define MSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1
+#define MSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1
+#define MSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
+#define MSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2
+#define MSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3
+#define MSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4
+#define MSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3
+#define MSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define MSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
+#define MSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0
+#define MSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1
+#define MSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1
+#define MSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1
+#define MSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2
+#define MSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define MSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define MSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define MSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define MSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define MSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define MSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define MSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define MSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define MSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 7
__le16 word0;
__le16 word1;
__le32 reg0;
@@ -11407,22 +10840,22 @@ struct ustorm_iscsi_conn_st_ctx {
};
/* iscsi connection context */
-struct e4_iscsi_conn_context {
+struct iscsi_conn_context {
struct ystorm_iscsi_conn_st_ctx ystorm_st_context;
struct pstorm_iscsi_tcp_conn_st_ctx pstorm_st_context;
struct regpair pstorm_st_padding[2];
struct pb_context xpb2_context;
struct xstorm_iscsi_tcp_conn_st_ctx xstorm_st_context;
struct regpair xstorm_st_padding[2];
- struct e4_xstorm_iscsi_conn_ag_ctx xstorm_ag_context;
- struct e4_tstorm_iscsi_conn_ag_ctx tstorm_ag_context;
+ struct xstorm_iscsi_conn_ag_ctx xstorm_ag_context;
+ struct tstorm_iscsi_conn_ag_ctx tstorm_ag_context;
struct regpair tstorm_ag_padding[2];
struct timers_context timer_context;
- struct e4_ustorm_iscsi_conn_ag_ctx ustorm_ag_context;
+ struct ustorm_iscsi_conn_ag_ctx ustorm_ag_context;
struct pb_context upb_context;
struct tstorm_iscsi_conn_st_ctx tstorm_st_context;
struct regpair tstorm_st_padding[2];
- struct e4_mstorm_iscsi_conn_ag_ctx mstorm_ag_context;
+ struct mstorm_iscsi_conn_ag_ctx mstorm_ag_context;
struct mstorm_iscsi_tcp_conn_st_ctx mstorm_st_context;
struct ustorm_iscsi_conn_st_ctx ustorm_st_context;
};
@@ -11433,37 +10866,37 @@ struct iscsi_init_ramrod_params {
struct tcp_init_params tcp_init;
};
-struct e4_ystorm_iscsi_conn_ag_ctx {
+struct ystorm_iscsi_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6
+#define YSTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1
+#define YSTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0
+#define YSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1
+#define YSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1
+#define YSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
+#define YSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2
+#define YSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3
+#define YSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4
+#define YSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3
+#define YSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define YSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
+#define YSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0
+#define YSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1
+#define YSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1
+#define YSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1
+#define YSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2
+#define YSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define YSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define YSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define YSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define YSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define YSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define YSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define YSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define YSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define YSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 7
u8 byte2;
u8 byte3;
__le16 word0;
@@ -11477,1922 +10910,4 @@ struct e4_ystorm_iscsi_conn_ag_ctx {
__le32 reg3;
};
-#define MFW_TRACE_SIGNATURE 0x25071946
-
-/* The trace in the buffer */
-#define MFW_TRACE_EVENTID_MASK 0x00ffff
-#define MFW_TRACE_PRM_SIZE_MASK 0x0f0000
-#define MFW_TRACE_PRM_SIZE_OFFSET 16
-#define MFW_TRACE_ENTRY_SIZE 3
-
-struct mcp_trace {
- u32 signature; /* Help to identify that the trace is valid */
- u32 size; /* the size of the trace buffer in bytes */
- u32 curr_level; /* 2 - all will be written to the buffer
- * 1 - debug trace will not be written
- * 0 - just errors will be written to the buffer
- */
- u32 modules_mask[2]; /* a bit per module, 1 means write it, 0 means
- * mask it.
- */
-
- /* Warning: the following pointers are assumed to be 32bits as they are
- * used only in the MFW.
- */
- u32 trace_prod; /* The next trace will be written to this offset */
- u32 trace_oldest; /* The oldest valid trace starts at this offset
- * (usually very close after the current producer).
- */
-};
-
-#define VF_MAX_STATIC 192
-
-#define MCP_GLOB_PATH_MAX 2
-#define MCP_PORT_MAX 2
-#define MCP_GLOB_PORT_MAX 4
-#define MCP_GLOB_FUNC_MAX 16
-
-typedef u32 offsize_t; /* In DWORDS !!! */
-/* Offset from the beginning of the MCP scratchpad */
-#define OFFSIZE_OFFSET_SHIFT 0
-#define OFFSIZE_OFFSET_MASK 0x0000ffff
-/* Size of specific element (not the whole array if any) */
-#define OFFSIZE_SIZE_SHIFT 16
-#define OFFSIZE_SIZE_MASK 0xffff0000
-
-#define SECTION_OFFSET(_offsize) ((((_offsize & \
- OFFSIZE_OFFSET_MASK) >> \
- OFFSIZE_OFFSET_SHIFT) << 2))
-
-#define QED_SECTION_SIZE(_offsize) (((_offsize & \
- OFFSIZE_SIZE_MASK) >> \
- OFFSIZE_SIZE_SHIFT) << 2)
-
-#define SECTION_ADDR(_offsize, idx) (MCP_REG_SCRATCH + \
- SECTION_OFFSET(_offsize) + \
- (QED_SECTION_SIZE(_offsize) * idx))
-
-#define SECTION_OFFSIZE_ADDR(_pub_base, _section) \
- (_pub_base + offsetof(struct mcp_public_data, sections[_section]))
-
-/* PHY configuration */
-struct eth_phy_cfg {
- u32 speed;
-#define ETH_SPEED_AUTONEG 0x0
-#define ETH_SPEED_SMARTLINQ 0x8
-
- u32 pause;
-#define ETH_PAUSE_NONE 0x0
-#define ETH_PAUSE_AUTONEG 0x1
-#define ETH_PAUSE_RX 0x2
-#define ETH_PAUSE_TX 0x4
-
- u32 adv_speed;
-
- u32 loopback_mode;
-#define ETH_LOOPBACK_NONE 0x0
-#define ETH_LOOPBACK_INT_PHY 0x1
-#define ETH_LOOPBACK_EXT_PHY 0x2
-#define ETH_LOOPBACK_EXT 0x3
-#define ETH_LOOPBACK_MAC 0x4
-#define ETH_LOOPBACK_CNIG_AH_ONLY_0123 0x5
-#define ETH_LOOPBACK_CNIG_AH_ONLY_2301 0x6
-#define ETH_LOOPBACK_PCS_AH_ONLY 0x7
-#define ETH_LOOPBACK_REVERSE_MAC_AH_ONLY 0x8
-#define ETH_LOOPBACK_INT_PHY_FEA_AH_ONLY 0x9
-
- u32 eee_cfg;
-#define EEE_CFG_EEE_ENABLED BIT(0)
-#define EEE_CFG_TX_LPI BIT(1)
-#define EEE_CFG_ADV_SPEED_1G BIT(2)
-#define EEE_CFG_ADV_SPEED_10G BIT(3)
-#define EEE_TX_TIMER_USEC_MASK 0xfffffff0
-#define EEE_TX_TIMER_USEC_OFFSET 4
-#define EEE_TX_TIMER_USEC_BALANCED_TIME 0xa00
-#define EEE_TX_TIMER_USEC_AGGRESSIVE_TIME 0x100
-#define EEE_TX_TIMER_USEC_LATENCY_TIME 0x6000
-
- u32 deprecated;
-
- u32 fec_mode;
-#define FEC_FORCE_MODE_MASK 0x000000ff
-#define FEC_FORCE_MODE_OFFSET 0
-#define FEC_FORCE_MODE_NONE 0x00
-#define FEC_FORCE_MODE_FIRECODE 0x01
-#define FEC_FORCE_MODE_RS 0x02
-#define FEC_FORCE_MODE_AUTO 0x07
-#define FEC_EXTENDED_MODE_MASK 0xffffff00
-#define FEC_EXTENDED_MODE_OFFSET 8
-#define ETH_EXT_FEC_NONE 0x00000100
-#define ETH_EXT_FEC_10G_NONE 0x00000200
-#define ETH_EXT_FEC_10G_BASE_R 0x00000400
-#define ETH_EXT_FEC_20G_NONE 0x00000800
-#define ETH_EXT_FEC_20G_BASE_R 0x00001000
-#define ETH_EXT_FEC_25G_NONE 0x00002000
-#define ETH_EXT_FEC_25G_BASE_R 0x00004000
-#define ETH_EXT_FEC_25G_RS528 0x00008000
-#define ETH_EXT_FEC_40G_NONE 0x00010000
-#define ETH_EXT_FEC_40G_BASE_R 0x00020000
-#define ETH_EXT_FEC_50G_NONE 0x00040000
-#define ETH_EXT_FEC_50G_BASE_R 0x00080000
-#define ETH_EXT_FEC_50G_RS528 0x00100000
-#define ETH_EXT_FEC_50G_RS544 0x00200000
-#define ETH_EXT_FEC_100G_NONE 0x00400000
-#define ETH_EXT_FEC_100G_BASE_R 0x00800000
-#define ETH_EXT_FEC_100G_RS528 0x01000000
-#define ETH_EXT_FEC_100G_RS544 0x02000000
-
- u32 extended_speed;
-#define ETH_EXT_SPEED_MASK 0x0000ffff
-#define ETH_EXT_SPEED_OFFSET 0
-#define ETH_EXT_SPEED_AN 0x00000001
-#define ETH_EXT_SPEED_1G 0x00000002
-#define ETH_EXT_SPEED_10G 0x00000004
-#define ETH_EXT_SPEED_20G 0x00000008
-#define ETH_EXT_SPEED_25G 0x00000010
-#define ETH_EXT_SPEED_40G 0x00000020
-#define ETH_EXT_SPEED_50G_BASE_R 0x00000040
-#define ETH_EXT_SPEED_50G_BASE_R2 0x00000080
-#define ETH_EXT_SPEED_100G_BASE_R2 0x00000100
-#define ETH_EXT_SPEED_100G_BASE_R4 0x00000200
-#define ETH_EXT_SPEED_100G_BASE_P4 0x00000400
-#define ETH_EXT_ADV_SPEED_MASK 0xffff0000
-#define ETH_EXT_ADV_SPEED_OFFSET 16
-#define ETH_EXT_ADV_SPEED_RESERVED 0x00010000
-#define ETH_EXT_ADV_SPEED_1G 0x00020000
-#define ETH_EXT_ADV_SPEED_10G 0x00040000
-#define ETH_EXT_ADV_SPEED_20G 0x00080000
-#define ETH_EXT_ADV_SPEED_25G 0x00100000
-#define ETH_EXT_ADV_SPEED_40G 0x00200000
-#define ETH_EXT_ADV_SPEED_50G_BASE_R 0x00400000
-#define ETH_EXT_ADV_SPEED_50G_BASE_R2 0x00800000
-#define ETH_EXT_ADV_SPEED_100G_BASE_R2 0x01000000
-#define ETH_EXT_ADV_SPEED_100G_BASE_R4 0x02000000
-#define ETH_EXT_ADV_SPEED_100G_BASE_P4 0x04000000
-};
-
-struct port_mf_cfg {
- u32 dynamic_cfg;
-#define PORT_MF_CFG_OV_TAG_MASK 0x0000ffff
-#define PORT_MF_CFG_OV_TAG_SHIFT 0
-#define PORT_MF_CFG_OV_TAG_DEFAULT PORT_MF_CFG_OV_TAG_MASK
-
- u32 reserved[1];
-};
-
-struct eth_stats {
- u64 r64;
- u64 r127;
- u64 r255;
- u64 r511;
- u64 r1023;
- u64 r1518;
-
- union {
- struct {
- u64 r1522;
- u64 r2047;
- u64 r4095;
- u64 r9216;
- u64 r16383;
- } bb0;
- struct {
- u64 unused1;
- u64 r1519_to_max;
- u64 unused2;
- u64 unused3;
- u64 unused4;
- } ah0;
- } u0;
-
- u64 rfcs;
- u64 rxcf;
- u64 rxpf;
- u64 rxpp;
- u64 raln;
- u64 rfcr;
- u64 rovr;
- u64 rjbr;
- u64 rund;
- u64 rfrg;
- u64 t64;
- u64 t127;
- u64 t255;
- u64 t511;
- u64 t1023;
- u64 t1518;
-
- union {
- struct {
- u64 t2047;
- u64 t4095;
- u64 t9216;
- u64 t16383;
- } bb1;
- struct {
- u64 t1519_to_max;
- u64 unused6;
- u64 unused7;
- u64 unused8;
- } ah1;
- } u1;
-
- u64 txpf;
- u64 txpp;
-
- union {
- struct {
- u64 tlpiec;
- u64 tncl;
- } bb2;
- struct {
- u64 unused9;
- u64 unused10;
- } ah2;
- } u2;
-
- u64 rbyte;
- u64 rxuca;
- u64 rxmca;
- u64 rxbca;
- u64 rxpok;
- u64 tbyte;
- u64 txuca;
- u64 txmca;
- u64 txbca;
- u64 txcf;
-};
-
-struct brb_stats {
- u64 brb_truncate[8];
- u64 brb_discard[8];
-};
-
-struct port_stats {
- struct brb_stats brb;
- struct eth_stats eth;
-};
-
-struct couple_mode_teaming {
- u8 port_cmt[MCP_GLOB_PORT_MAX];
-#define PORT_CMT_IN_TEAM (1 << 0)
-
-#define PORT_CMT_PORT_ROLE (1 << 1)
-#define PORT_CMT_PORT_INACTIVE (0 << 1)
-#define PORT_CMT_PORT_ACTIVE (1 << 1)
-
-#define PORT_CMT_TEAM_MASK (1 << 2)
-#define PORT_CMT_TEAM0 (0 << 2)
-#define PORT_CMT_TEAM1 (1 << 2)
-};
-
-#define LLDP_CHASSIS_ID_STAT_LEN 4
-#define LLDP_PORT_ID_STAT_LEN 4
-#define DCBX_MAX_APP_PROTOCOL 32
-#define MAX_SYSTEM_LLDP_TLV_DATA 32
-
-enum _lldp_agent {
- LLDP_NEAREST_BRIDGE = 0,
- LLDP_NEAREST_NON_TPMR_BRIDGE,
- LLDP_NEAREST_CUSTOMER_BRIDGE,
- LLDP_MAX_LLDP_AGENTS
-};
-
-struct lldp_config_params_s {
- u32 config;
-#define LLDP_CONFIG_TX_INTERVAL_MASK 0x000000ff
-#define LLDP_CONFIG_TX_INTERVAL_SHIFT 0
-#define LLDP_CONFIG_HOLD_MASK 0x00000f00
-#define LLDP_CONFIG_HOLD_SHIFT 8
-#define LLDP_CONFIG_MAX_CREDIT_MASK 0x0000f000
-#define LLDP_CONFIG_MAX_CREDIT_SHIFT 12
-#define LLDP_CONFIG_ENABLE_RX_MASK 0x40000000
-#define LLDP_CONFIG_ENABLE_RX_SHIFT 30
-#define LLDP_CONFIG_ENABLE_TX_MASK 0x80000000
-#define LLDP_CONFIG_ENABLE_TX_SHIFT 31
- u32 local_chassis_id[LLDP_CHASSIS_ID_STAT_LEN];
- u32 local_port_id[LLDP_PORT_ID_STAT_LEN];
-};
-
-struct lldp_status_params_s {
- u32 prefix_seq_num;
- u32 status;
- u32 peer_chassis_id[LLDP_CHASSIS_ID_STAT_LEN];
- u32 peer_port_id[LLDP_PORT_ID_STAT_LEN];
- u32 suffix_seq_num;
-};
-
-struct dcbx_ets_feature {
- u32 flags;
-#define DCBX_ETS_ENABLED_MASK 0x00000001
-#define DCBX_ETS_ENABLED_SHIFT 0
-#define DCBX_ETS_WILLING_MASK 0x00000002
-#define DCBX_ETS_WILLING_SHIFT 1
-#define DCBX_ETS_ERROR_MASK 0x00000004
-#define DCBX_ETS_ERROR_SHIFT 2
-#define DCBX_ETS_CBS_MASK 0x00000008
-#define DCBX_ETS_CBS_SHIFT 3
-#define DCBX_ETS_MAX_TCS_MASK 0x000000f0
-#define DCBX_ETS_MAX_TCS_SHIFT 4
-#define DCBX_OOO_TC_MASK 0x00000f00
-#define DCBX_OOO_TC_SHIFT 8
- u32 pri_tc_tbl[1];
-#define DCBX_TCP_OOO_TC (4)
-
-#define NIG_ETS_ISCSI_OOO_CLIENT_OFFSET (DCBX_TCP_OOO_TC + 1)
-#define DCBX_CEE_STRICT_PRIORITY 0xf
- u32 tc_bw_tbl[2];
- u32 tc_tsa_tbl[2];
-#define DCBX_ETS_TSA_STRICT 0
-#define DCBX_ETS_TSA_CBS 1
-#define DCBX_ETS_TSA_ETS 2
-};
-
-#define DCBX_TCP_OOO_TC (4)
-#define DCBX_TCP_OOO_K2_4PORT_TC (3)
-
-struct dcbx_app_priority_entry {
- u32 entry;
-#define DCBX_APP_PRI_MAP_MASK 0x000000ff
-#define DCBX_APP_PRI_MAP_SHIFT 0
-#define DCBX_APP_PRI_0 0x01
-#define DCBX_APP_PRI_1 0x02
-#define DCBX_APP_PRI_2 0x04
-#define DCBX_APP_PRI_3 0x08
-#define DCBX_APP_PRI_4 0x10
-#define DCBX_APP_PRI_5 0x20
-#define DCBX_APP_PRI_6 0x40
-#define DCBX_APP_PRI_7 0x80
-#define DCBX_APP_SF_MASK 0x00000300
-#define DCBX_APP_SF_SHIFT 8
-#define DCBX_APP_SF_ETHTYPE 0
-#define DCBX_APP_SF_PORT 1
-#define DCBX_APP_SF_IEEE_MASK 0x0000f000
-#define DCBX_APP_SF_IEEE_SHIFT 12
-#define DCBX_APP_SF_IEEE_RESERVED 0
-#define DCBX_APP_SF_IEEE_ETHTYPE 1
-#define DCBX_APP_SF_IEEE_TCP_PORT 2
-#define DCBX_APP_SF_IEEE_UDP_PORT 3
-#define DCBX_APP_SF_IEEE_TCP_UDP_PORT 4
-
-#define DCBX_APP_PROTOCOL_ID_MASK 0xffff0000
-#define DCBX_APP_PROTOCOL_ID_SHIFT 16
-};
-
-struct dcbx_app_priority_feature {
- u32 flags;
-#define DCBX_APP_ENABLED_MASK 0x00000001
-#define DCBX_APP_ENABLED_SHIFT 0
-#define DCBX_APP_WILLING_MASK 0x00000002
-#define DCBX_APP_WILLING_SHIFT 1
-#define DCBX_APP_ERROR_MASK 0x00000004
-#define DCBX_APP_ERROR_SHIFT 2
-#define DCBX_APP_MAX_TCS_MASK 0x0000f000
-#define DCBX_APP_MAX_TCS_SHIFT 12
-#define DCBX_APP_NUM_ENTRIES_MASK 0x00ff0000
-#define DCBX_APP_NUM_ENTRIES_SHIFT 16
- struct dcbx_app_priority_entry app_pri_tbl[DCBX_MAX_APP_PROTOCOL];
-};
-
-struct dcbx_features {
- struct dcbx_ets_feature ets;
- u32 pfc;
-#define DCBX_PFC_PRI_EN_BITMAP_MASK 0x000000ff
-#define DCBX_PFC_PRI_EN_BITMAP_SHIFT 0
-#define DCBX_PFC_PRI_EN_BITMAP_PRI_0 0x01
-#define DCBX_PFC_PRI_EN_BITMAP_PRI_1 0x02
-#define DCBX_PFC_PRI_EN_BITMAP_PRI_2 0x04
-#define DCBX_PFC_PRI_EN_BITMAP_PRI_3 0x08
-#define DCBX_PFC_PRI_EN_BITMAP_PRI_4 0x10
-#define DCBX_PFC_PRI_EN_BITMAP_PRI_5 0x20
-#define DCBX_PFC_PRI_EN_BITMAP_PRI_6 0x40
-#define DCBX_PFC_PRI_EN_BITMAP_PRI_7 0x80
-
-#define DCBX_PFC_FLAGS_MASK 0x0000ff00
-#define DCBX_PFC_FLAGS_SHIFT 8
-#define DCBX_PFC_CAPS_MASK 0x00000f00
-#define DCBX_PFC_CAPS_SHIFT 8
-#define DCBX_PFC_MBC_MASK 0x00004000
-#define DCBX_PFC_MBC_SHIFT 14
-#define DCBX_PFC_WILLING_MASK 0x00008000
-#define DCBX_PFC_WILLING_SHIFT 15
-#define DCBX_PFC_ENABLED_MASK 0x00010000
-#define DCBX_PFC_ENABLED_SHIFT 16
-#define DCBX_PFC_ERROR_MASK 0x00020000
-#define DCBX_PFC_ERROR_SHIFT 17
-
- struct dcbx_app_priority_feature app;
-};
-
-struct dcbx_local_params {
- u32 config;
-#define DCBX_CONFIG_VERSION_MASK 0x00000007
-#define DCBX_CONFIG_VERSION_SHIFT 0
-#define DCBX_CONFIG_VERSION_DISABLED 0
-#define DCBX_CONFIG_VERSION_IEEE 1
-#define DCBX_CONFIG_VERSION_CEE 2
-#define DCBX_CONFIG_VERSION_STATIC 4
-
- u32 flags;
- struct dcbx_features features;
-};
-
-struct dcbx_mib {
- u32 prefix_seq_num;
- u32 flags;
- struct dcbx_features features;
- u32 suffix_seq_num;
-};
-
-struct lldp_system_tlvs_buffer_s {
- u16 valid;
- u16 length;
- u32 data[MAX_SYSTEM_LLDP_TLV_DATA];
-};
-
-struct dcb_dscp_map {
- u32 flags;
-#define DCB_DSCP_ENABLE_MASK 0x1
-#define DCB_DSCP_ENABLE_SHIFT 0
-#define DCB_DSCP_ENABLE 1
- u32 dscp_pri_map[8];
-};
-
-struct public_global {
- u32 max_path;
- u32 max_ports;
-#define MODE_1P 1
-#define MODE_2P 2
-#define MODE_3P 3
-#define MODE_4P 4
- u32 debug_mb_offset;
- u32 phymod_dbg_mb_offset;
- struct couple_mode_teaming cmt;
- s32 internal_temperature;
- u32 mfw_ver;
- u32 running_bundle_id;
- s32 external_temperature;
- u32 mdump_reason;
- u64 reserved;
- u32 data_ptr;
- u32 data_size;
-};
-
-struct fw_flr_mb {
- u32 aggint;
- u32 opgen_addr;
- u32 accum_ack;
-};
-
-struct public_path {
- struct fw_flr_mb flr_mb;
- u32 mcp_vf_disabled[VF_MAX_STATIC / 32];
-
- u32 process_kill;
-#define PROCESS_KILL_COUNTER_MASK 0x0000ffff
-#define PROCESS_KILL_COUNTER_SHIFT 0
-#define PROCESS_KILL_GLOB_AEU_BIT_MASK 0xffff0000
-#define PROCESS_KILL_GLOB_AEU_BIT_SHIFT 16
-#define GLOBAL_AEU_BIT(aeu_reg_id, aeu_bit) (aeu_reg_id * 32 + aeu_bit)
-};
-
-struct public_port {
- u32 validity_map;
-
- u32 link_status;
-#define LINK_STATUS_LINK_UP 0x00000001
-#define LINK_STATUS_SPEED_AND_DUPLEX_MASK 0x0000001e
-#define LINK_STATUS_SPEED_AND_DUPLEX_1000THD (1 << 1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_1000TFD (2 << 1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_10G (3 << 1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_20G (4 << 1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_40G (5 << 1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_50G (6 << 1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_100G (7 << 1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_25G (8 << 1)
-#define LINK_STATUS_AUTO_NEGOTIATE_ENABLED 0x00000020
-#define LINK_STATUS_AUTO_NEGOTIATE_COMPLETE 0x00000040
-#define LINK_STATUS_PARALLEL_DETECTION_USED 0x00000080
-#define LINK_STATUS_PFC_ENABLED 0x00000100
-#define LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE 0x00000200
-#define LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE 0x00000400
-#define LINK_STATUS_LINK_PARTNER_10G_CAPABLE 0x00000800
-#define LINK_STATUS_LINK_PARTNER_20G_CAPABLE 0x00001000
-#define LINK_STATUS_LINK_PARTNER_40G_CAPABLE 0x00002000
-#define LINK_STATUS_LINK_PARTNER_50G_CAPABLE 0x00004000
-#define LINK_STATUS_LINK_PARTNER_100G_CAPABLE 0x00008000
-#define LINK_STATUS_LINK_PARTNER_25G_CAPABLE 0x00010000
-#define LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK 0x000c0000
-#define LINK_STATUS_LINK_PARTNER_NOT_PAUSE_CAPABLE (0 << 18)
-#define LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE (1 << 18)
-#define LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE (2 << 18)
-#define LINK_STATUS_LINK_PARTNER_BOTH_PAUSE (3 << 18)
-#define LINK_STATUS_SFP_TX_FAULT 0x00100000
-#define LINK_STATUS_TX_FLOW_CONTROL_ENABLED 0x00200000
-#define LINK_STATUS_RX_FLOW_CONTROL_ENABLED 0x00400000
-#define LINK_STATUS_RX_SIGNAL_PRESENT 0x00800000
-#define LINK_STATUS_MAC_LOCAL_FAULT 0x01000000
-#define LINK_STATUS_MAC_REMOTE_FAULT 0x02000000
-#define LINK_STATUS_UNSUPPORTED_SPD_REQ 0x04000000
-
-#define LINK_STATUS_FEC_MODE_MASK 0x38000000
-#define LINK_STATUS_FEC_MODE_NONE (0 << 27)
-#define LINK_STATUS_FEC_MODE_FIRECODE_CL74 (1 << 27)
-#define LINK_STATUS_FEC_MODE_RS_CL91 (2 << 27)
-
- u32 link_status1;
- u32 ext_phy_fw_version;
- u32 drv_phy_cfg_addr;
-
- u32 port_stx;
-
- u32 stat_nig_timer;
-
- struct port_mf_cfg port_mf_config;
- struct port_stats stats;
-
- u32 media_type;
-#define MEDIA_UNSPECIFIED 0x0
-#define MEDIA_SFPP_10G_FIBER 0x1
-#define MEDIA_XFP_FIBER 0x2
-#define MEDIA_DA_TWINAX 0x3
-#define MEDIA_BASE_T 0x4
-#define MEDIA_SFP_1G_FIBER 0x5
-#define MEDIA_MODULE_FIBER 0x6
-#define MEDIA_KR 0xf0
-#define MEDIA_NOT_PRESENT 0xff
-
- u32 lfa_status;
- u32 link_change_count;
-
- struct lldp_config_params_s lldp_config_params[LLDP_MAX_LLDP_AGENTS];
- struct lldp_status_params_s lldp_status_params[LLDP_MAX_LLDP_AGENTS];
- struct lldp_system_tlvs_buffer_s system_lldp_tlvs_buf;
-
- /* DCBX related MIB */
- struct dcbx_local_params local_admin_dcbx_mib;
- struct dcbx_mib remote_dcbx_mib;
- struct dcbx_mib operational_dcbx_mib;
-
- u32 reserved[2];
-
- u32 transceiver_data;
-#define ETH_TRANSCEIVER_STATE_MASK 0x000000ff
-#define ETH_TRANSCEIVER_STATE_SHIFT 0x00000000
-#define ETH_TRANSCEIVER_STATE_OFFSET 0x00000000
-#define ETH_TRANSCEIVER_STATE_UNPLUGGED 0x00000000
-#define ETH_TRANSCEIVER_STATE_PRESENT 0x00000001
-#define ETH_TRANSCEIVER_STATE_VALID 0x00000003
-#define ETH_TRANSCEIVER_STATE_UPDATING 0x00000008
-#define ETH_TRANSCEIVER_TYPE_MASK 0x0000ff00
-#define ETH_TRANSCEIVER_TYPE_OFFSET 0x8
-#define ETH_TRANSCEIVER_TYPE_NONE 0x00
-#define ETH_TRANSCEIVER_TYPE_UNKNOWN 0xff
-#define ETH_TRANSCEIVER_TYPE_1G_PCC 0x01
-#define ETH_TRANSCEIVER_TYPE_1G_ACC 0x02
-#define ETH_TRANSCEIVER_TYPE_1G_LX 0x03
-#define ETH_TRANSCEIVER_TYPE_1G_SX 0x04
-#define ETH_TRANSCEIVER_TYPE_10G_SR 0x05
-#define ETH_TRANSCEIVER_TYPE_10G_LR 0x06
-#define ETH_TRANSCEIVER_TYPE_10G_LRM 0x07
-#define ETH_TRANSCEIVER_TYPE_10G_ER 0x08
-#define ETH_TRANSCEIVER_TYPE_10G_PCC 0x09
-#define ETH_TRANSCEIVER_TYPE_10G_ACC 0x0a
-#define ETH_TRANSCEIVER_TYPE_XLPPI 0x0b
-#define ETH_TRANSCEIVER_TYPE_40G_LR4 0x0c
-#define ETH_TRANSCEIVER_TYPE_40G_SR4 0x0d
-#define ETH_TRANSCEIVER_TYPE_40G_CR4 0x0e
-#define ETH_TRANSCEIVER_TYPE_100G_AOC 0x0f
-#define ETH_TRANSCEIVER_TYPE_100G_SR4 0x10
-#define ETH_TRANSCEIVER_TYPE_100G_LR4 0x11
-#define ETH_TRANSCEIVER_TYPE_100G_ER4 0x12
-#define ETH_TRANSCEIVER_TYPE_100G_ACC 0x13
-#define ETH_TRANSCEIVER_TYPE_100G_CR4 0x14
-#define ETH_TRANSCEIVER_TYPE_4x10G_SR 0x15
-#define ETH_TRANSCEIVER_TYPE_25G_CA_N 0x16
-#define ETH_TRANSCEIVER_TYPE_25G_ACC_S 0x17
-#define ETH_TRANSCEIVER_TYPE_25G_CA_S 0x18
-#define ETH_TRANSCEIVER_TYPE_25G_ACC_M 0x19
-#define ETH_TRANSCEIVER_TYPE_25G_CA_L 0x1a
-#define ETH_TRANSCEIVER_TYPE_25G_ACC_L 0x1b
-#define ETH_TRANSCEIVER_TYPE_25G_SR 0x1c
-#define ETH_TRANSCEIVER_TYPE_25G_LR 0x1d
-#define ETH_TRANSCEIVER_TYPE_25G_AOC 0x1e
-#define ETH_TRANSCEIVER_TYPE_4x10G 0x1f
-#define ETH_TRANSCEIVER_TYPE_4x25G_CR 0x20
-#define ETH_TRANSCEIVER_TYPE_1000BASET 0x21
-#define ETH_TRANSCEIVER_TYPE_10G_BASET 0x22
-#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR 0x30
-#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR 0x31
-#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR 0x32
-#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR 0x33
-#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR 0x34
-#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR 0x35
-#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_AOC 0x36
-#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_SR 0x37
-#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_LR 0x38
-#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_SR 0x39
-#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_LR 0x3a
-
- u32 wol_info;
- u32 wol_pkt_len;
- u32 wol_pkt_details;
- struct dcb_dscp_map dcb_dscp_map;
-
- u32 eee_status;
-#define EEE_ACTIVE_BIT BIT(0)
-#define EEE_LD_ADV_STATUS_MASK 0x000000f0
-#define EEE_LD_ADV_STATUS_OFFSET 4
-#define EEE_1G_ADV BIT(1)
-#define EEE_10G_ADV BIT(2)
-#define EEE_LP_ADV_STATUS_MASK 0x00000f00
-#define EEE_LP_ADV_STATUS_OFFSET 8
-#define EEE_SUPPORTED_SPEED_MASK 0x0000f000
-#define EEE_SUPPORTED_SPEED_OFFSET 12
-#define EEE_1G_SUPPORTED BIT(1)
-#define EEE_10G_SUPPORTED BIT(2)
-
- u32 eee_remote;
-#define EEE_REMOTE_TW_TX_MASK 0x0000ffff
-#define EEE_REMOTE_TW_TX_OFFSET 0
-#define EEE_REMOTE_TW_RX_MASK 0xffff0000
-#define EEE_REMOTE_TW_RX_OFFSET 16
-
- u32 reserved1;
- u32 oem_cfg_port;
-#define OEM_CFG_CHANNEL_TYPE_MASK 0x00000003
-#define OEM_CFG_CHANNEL_TYPE_OFFSET 0
-#define OEM_CFG_CHANNEL_TYPE_VLAN_PARTITION 0x1
-#define OEM_CFG_CHANNEL_TYPE_STAGGED 0x2
-#define OEM_CFG_SCHED_TYPE_MASK 0x0000000C
-#define OEM_CFG_SCHED_TYPE_OFFSET 2
-#define OEM_CFG_SCHED_TYPE_ETS 0x1
-#define OEM_CFG_SCHED_TYPE_VNIC_BW 0x2
-};
-
-struct public_func {
- u32 reserved0[2];
-
- u32 mtu_size;
-
- u32 reserved[7];
-
- u32 config;
-#define FUNC_MF_CFG_FUNC_HIDE 0x00000001
-#define FUNC_MF_CFG_PAUSE_ON_HOST_RING 0x00000002
-#define FUNC_MF_CFG_PAUSE_ON_HOST_RING_SHIFT 0x00000001
-
-#define FUNC_MF_CFG_PROTOCOL_MASK 0x000000f0
-#define FUNC_MF_CFG_PROTOCOL_SHIFT 4
-#define FUNC_MF_CFG_PROTOCOL_ETHERNET 0x00000000
-#define FUNC_MF_CFG_PROTOCOL_ISCSI 0x00000010
-#define FUNC_MF_CFG_PROTOCOL_FCOE 0x00000020
-#define FUNC_MF_CFG_PROTOCOL_ROCE 0x00000030
-#define FUNC_MF_CFG_PROTOCOL_NVMETCP 0x00000040
-#define FUNC_MF_CFG_PROTOCOL_MAX 0x00000040
-
-#define FUNC_MF_CFG_MIN_BW_MASK 0x0000ff00
-#define FUNC_MF_CFG_MIN_BW_SHIFT 8
-#define FUNC_MF_CFG_MIN_BW_DEFAULT 0x00000000
-#define FUNC_MF_CFG_MAX_BW_MASK 0x00ff0000
-#define FUNC_MF_CFG_MAX_BW_SHIFT 16
-#define FUNC_MF_CFG_MAX_BW_DEFAULT 0x00640000
-
- u32 status;
-#define FUNC_STATUS_VIRTUAL_LINK_UP 0x00000001
-
- u32 mac_upper;
-#define FUNC_MF_CFG_UPPERMAC_MASK 0x0000ffff
-#define FUNC_MF_CFG_UPPERMAC_SHIFT 0
-#define FUNC_MF_CFG_UPPERMAC_DEFAULT FUNC_MF_CFG_UPPERMAC_MASK
- u32 mac_lower;
-#define FUNC_MF_CFG_LOWERMAC_DEFAULT 0xffffffff
-
- u32 fcoe_wwn_port_name_upper;
- u32 fcoe_wwn_port_name_lower;
-
- u32 fcoe_wwn_node_name_upper;
- u32 fcoe_wwn_node_name_lower;
-
- u32 ovlan_stag;
-#define FUNC_MF_CFG_OV_STAG_MASK 0x0000ffff
-#define FUNC_MF_CFG_OV_STAG_SHIFT 0
-#define FUNC_MF_CFG_OV_STAG_DEFAULT FUNC_MF_CFG_OV_STAG_MASK
-
- u32 pf_allocation;
-
- u32 preserve_data;
-
- u32 driver_last_activity_ts;
-
- u32 drv_ack_vf_disabled[VF_MAX_STATIC / 32];
-
- u32 drv_id;
-#define DRV_ID_PDA_COMP_VER_MASK 0x0000ffff
-#define DRV_ID_PDA_COMP_VER_SHIFT 0
-
-#define LOAD_REQ_HSI_VERSION 2
-#define DRV_ID_MCP_HSI_VER_MASK 0x00ff0000
-#define DRV_ID_MCP_HSI_VER_SHIFT 16
-#define DRV_ID_MCP_HSI_VER_CURRENT (LOAD_REQ_HSI_VERSION << \
- DRV_ID_MCP_HSI_VER_SHIFT)
-
-#define DRV_ID_DRV_TYPE_MASK 0x7f000000
-#define DRV_ID_DRV_TYPE_SHIFT 24
-#define DRV_ID_DRV_TYPE_UNKNOWN (0 << DRV_ID_DRV_TYPE_SHIFT)
-#define DRV_ID_DRV_TYPE_LINUX (1 << DRV_ID_DRV_TYPE_SHIFT)
-
-#define DRV_ID_DRV_INIT_HW_MASK 0x80000000
-#define DRV_ID_DRV_INIT_HW_SHIFT 31
-#define DRV_ID_DRV_INIT_HW_FLAG (1 << DRV_ID_DRV_INIT_HW_SHIFT)
-
- u32 oem_cfg_func;
-#define OEM_CFG_FUNC_TC_MASK 0x0000000F
-#define OEM_CFG_FUNC_TC_OFFSET 0
-#define OEM_CFG_FUNC_TC_0 0x0
-#define OEM_CFG_FUNC_TC_1 0x1
-#define OEM_CFG_FUNC_TC_2 0x2
-#define OEM_CFG_FUNC_TC_3 0x3
-#define OEM_CFG_FUNC_TC_4 0x4
-#define OEM_CFG_FUNC_TC_5 0x5
-#define OEM_CFG_FUNC_TC_6 0x6
-#define OEM_CFG_FUNC_TC_7 0x7
-
-#define OEM_CFG_FUNC_HOST_PRI_CTRL_MASK 0x00000030
-#define OEM_CFG_FUNC_HOST_PRI_CTRL_OFFSET 4
-#define OEM_CFG_FUNC_HOST_PRI_CTRL_VNIC 0x1
-#define OEM_CFG_FUNC_HOST_PRI_CTRL_OS 0x2
-};
-
-struct mcp_mac {
- u32 mac_upper;
- u32 mac_lower;
-};
-
-struct mcp_val64 {
- u32 lo;
- u32 hi;
-};
-
-struct mcp_file_att {
- u32 nvm_start_addr;
- u32 len;
-};
-
-struct bist_nvm_image_att {
- u32 return_code;
- u32 image_type;
- u32 nvm_start_addr;
- u32 len;
-};
-
-#define MCP_DRV_VER_STR_SIZE 16
-#define MCP_DRV_VER_STR_SIZE_DWORD (MCP_DRV_VER_STR_SIZE / sizeof(u32))
-#define MCP_DRV_NVM_BUF_LEN 32
-struct drv_version_stc {
- u32 version;
- u8 name[MCP_DRV_VER_STR_SIZE - 4];
-};
-
-struct lan_stats_stc {
- u64 ucast_rx_pkts;
- u64 ucast_tx_pkts;
- u32 fcs_err;
- u32 rserved;
-};
-
-struct fcoe_stats_stc {
- u64 rx_pkts;
- u64 tx_pkts;
- u32 fcs_err;
- u32 login_failure;
-};
-
-struct ocbb_data_stc {
- u32 ocbb_host_addr;
- u32 ocsd_host_addr;
- u32 ocsd_req_update_interval;
-};
-
-#define MAX_NUM_OF_SENSORS 7
-struct temperature_status_stc {
- u32 num_of_sensors;
- u32 sensor[MAX_NUM_OF_SENSORS];
-};
-
-/* crash dump configuration header */
-struct mdump_config_stc {
- u32 version;
- u32 config;
- u32 epoc;
- u32 num_of_logs;
- u32 valid_logs;
-};
-
-enum resource_id_enum {
- RESOURCE_NUM_SB_E = 0,
- RESOURCE_NUM_L2_QUEUE_E = 1,
- RESOURCE_NUM_VPORT_E = 2,
- RESOURCE_NUM_VMQ_E = 3,
- RESOURCE_FACTOR_NUM_RSS_PF_E = 4,
- RESOURCE_FACTOR_RSS_PER_VF_E = 5,
- RESOURCE_NUM_RL_E = 6,
- RESOURCE_NUM_PQ_E = 7,
- RESOURCE_NUM_VF_E = 8,
- RESOURCE_VFC_FILTER_E = 9,
- RESOURCE_ILT_E = 10,
- RESOURCE_CQS_E = 11,
- RESOURCE_GFT_PROFILES_E = 12,
- RESOURCE_NUM_TC_E = 13,
- RESOURCE_NUM_RSS_ENGINES_E = 14,
- RESOURCE_LL2_QUEUE_E = 15,
- RESOURCE_RDMA_STATS_QUEUE_E = 16,
- RESOURCE_BDQ_E = 17,
- RESOURCE_QCN_E = 18,
- RESOURCE_LLH_FILTER_E = 19,
- RESOURCE_VF_MAC_ADDR = 20,
- RESOURCE_LL2_CQS_E = 21,
- RESOURCE_VF_CNQS = 22,
- RESOURCE_MAX_NUM,
- RESOURCE_NUM_INVALID = 0xFFFFFFFF
-};
-
-/* Resource ID is to be filled by the driver in the MB request
- * Size, offset & flags to be filled by the MFW in the MB response
- */
-struct resource_info {
- enum resource_id_enum res_id;
- u32 size; /* number of allocated resources */
- u32 offset; /* Offset of the 1st resource */
- u32 vf_size;
- u32 vf_offset;
- u32 flags;
-#define RESOURCE_ELEMENT_STRICT (1 << 0)
-};
-
-#define DRV_ROLE_NONE 0
-#define DRV_ROLE_PREBOOT 1
-#define DRV_ROLE_OS 2
-#define DRV_ROLE_KDUMP 3
-
-struct load_req_stc {
- u32 drv_ver_0;
- u32 drv_ver_1;
- u32 fw_ver;
- u32 misc0;
-#define LOAD_REQ_ROLE_MASK 0x000000FF
-#define LOAD_REQ_ROLE_SHIFT 0
-#define LOAD_REQ_LOCK_TO_MASK 0x0000FF00
-#define LOAD_REQ_LOCK_TO_SHIFT 8
-#define LOAD_REQ_LOCK_TO_DEFAULT 0
-#define LOAD_REQ_LOCK_TO_NONE 255
-#define LOAD_REQ_FORCE_MASK 0x000F0000
-#define LOAD_REQ_FORCE_SHIFT 16
-#define LOAD_REQ_FORCE_NONE 0
-#define LOAD_REQ_FORCE_PF 1
-#define LOAD_REQ_FORCE_ALL 2
-#define LOAD_REQ_FLAGS0_MASK 0x00F00000
-#define LOAD_REQ_FLAGS0_SHIFT 20
-#define LOAD_REQ_FLAGS0_AVOID_RESET (0x1 << 0)
-};
-
-struct load_rsp_stc {
- u32 drv_ver_0;
- u32 drv_ver_1;
- u32 fw_ver;
- u32 misc0;
-#define LOAD_RSP_ROLE_MASK 0x000000FF
-#define LOAD_RSP_ROLE_SHIFT 0
-#define LOAD_RSP_HSI_MASK 0x0000FF00
-#define LOAD_RSP_HSI_SHIFT 8
-#define LOAD_RSP_FLAGS0_MASK 0x000F0000
-#define LOAD_RSP_FLAGS0_SHIFT 16
-#define LOAD_RSP_FLAGS0_DRV_EXISTS (0x1 << 0)
-};
-
-struct mdump_retain_data_stc {
- u32 valid;
- u32 epoch;
- u32 pf;
- u32 status;
-};
-
-union drv_union_data {
- u32 ver_str[MCP_DRV_VER_STR_SIZE_DWORD];
- struct mcp_mac wol_mac;
-
- struct eth_phy_cfg drv_phy_cfg;
-
- struct mcp_val64 val64;
-
- u8 raw_data[MCP_DRV_NVM_BUF_LEN];
-
- struct mcp_file_att file_att;
-
- u32 ack_vf_disabled[VF_MAX_STATIC / 32];
-
- struct drv_version_stc drv_version;
-
- struct lan_stats_stc lan_stats;
- struct fcoe_stats_stc fcoe_stats;
- struct ocbb_data_stc ocbb_info;
- struct temperature_status_stc temp_info;
- struct resource_info resource;
- struct bist_nvm_image_att nvm_image_att;
- struct mdump_config_stc mdump_config;
-};
-
-struct public_drv_mb {
- u32 drv_mb_header;
-#define DRV_MSG_CODE_MASK 0xffff0000
-#define DRV_MSG_CODE_LOAD_REQ 0x10000000
-#define DRV_MSG_CODE_LOAD_DONE 0x11000000
-#define DRV_MSG_CODE_INIT_HW 0x12000000
-#define DRV_MSG_CODE_CANCEL_LOAD_REQ 0x13000000
-#define DRV_MSG_CODE_UNLOAD_REQ 0x20000000
-#define DRV_MSG_CODE_UNLOAD_DONE 0x21000000
-#define DRV_MSG_CODE_INIT_PHY 0x22000000
-#define DRV_MSG_CODE_LINK_RESET 0x23000000
-#define DRV_MSG_CODE_SET_DCBX 0x25000000
-#define DRV_MSG_CODE_OV_UPDATE_CURR_CFG 0x26000000
-#define DRV_MSG_CODE_OV_UPDATE_BUS_NUM 0x27000000
-#define DRV_MSG_CODE_OV_UPDATE_BOOT_PROGRESS 0x28000000
-#define DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER 0x29000000
-#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE 0x31000000
-#define DRV_MSG_CODE_BW_UPDATE_ACK 0x32000000
-#define DRV_MSG_CODE_OV_UPDATE_MTU 0x33000000
-#define DRV_MSG_GET_RESOURCE_ALLOC_MSG 0x34000000
-#define DRV_MSG_SET_RESOURCE_VALUE_MSG 0x35000000
-#define DRV_MSG_CODE_OV_UPDATE_WOL 0x38000000
-#define DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE 0x39000000
-#define DRV_MSG_CODE_GET_OEM_UPDATES 0x41000000
-
-#define DRV_MSG_CODE_BW_UPDATE_ACK 0x32000000
-#define DRV_MSG_CODE_NIG_DRAIN 0x30000000
-#define DRV_MSG_CODE_S_TAG_UPDATE_ACK 0x3b000000
-#define DRV_MSG_CODE_GET_NVM_CFG_OPTION 0x003e0000
-#define DRV_MSG_CODE_SET_NVM_CFG_OPTION 0x003f0000
-#define DRV_MSG_CODE_INITIATE_PF_FLR 0x02010000
-#define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000
-#define DRV_MSG_CODE_CFG_VF_MSIX 0xc0010000
-#define DRV_MSG_CODE_CFG_PF_VFS_MSIX 0xc0020000
-#define DRV_MSG_CODE_NVM_PUT_FILE_BEGIN 0x00010000
-#define DRV_MSG_CODE_NVM_PUT_FILE_DATA 0x00020000
-#define DRV_MSG_CODE_NVM_GET_FILE_ATT 0x00030000
-#define DRV_MSG_CODE_NVM_READ_NVRAM 0x00050000
-#define DRV_MSG_CODE_NVM_WRITE_NVRAM 0x00060000
-#define DRV_MSG_CODE_MCP_RESET 0x00090000
-#define DRV_MSG_CODE_SET_VERSION 0x000f0000
-#define DRV_MSG_CODE_MCP_HALT 0x00100000
-#define DRV_MSG_CODE_SET_VMAC 0x00110000
-#define DRV_MSG_CODE_GET_VMAC 0x00120000
-#define DRV_MSG_CODE_VMAC_TYPE_SHIFT 4
-#define DRV_MSG_CODE_VMAC_TYPE_MASK 0x30
-#define DRV_MSG_CODE_VMAC_TYPE_MAC 1
-#define DRV_MSG_CODE_VMAC_TYPE_WWNN 2
-#define DRV_MSG_CODE_VMAC_TYPE_WWPN 3
-
-#define DRV_MSG_CODE_GET_STATS 0x00130000
-#define DRV_MSG_CODE_STATS_TYPE_LAN 1
-#define DRV_MSG_CODE_STATS_TYPE_FCOE 2
-#define DRV_MSG_CODE_STATS_TYPE_ISCSI 3
-#define DRV_MSG_CODE_STATS_TYPE_RDMA 4
-
-#define DRV_MSG_CODE_TRANSCEIVER_READ 0x00160000
-
-#define DRV_MSG_CODE_MASK_PARITIES 0x001a0000
-
-#define DRV_MSG_CODE_BIST_TEST 0x001e0000
-#define DRV_MSG_CODE_SET_LED_MODE 0x00200000
-#define DRV_MSG_CODE_RESOURCE_CMD 0x00230000
-/* Send crash dump commands with param[3:0] - opcode */
-#define DRV_MSG_CODE_MDUMP_CMD 0x00250000
-#define DRV_MSG_CODE_GET_TLV_DONE 0x002f0000
-#define DRV_MSG_CODE_GET_ENGINE_CONFIG 0x00370000
-#define DRV_MSG_CODE_GET_PPFID_BITMAP 0x43000000
-
-#define DRV_MSG_CODE_DEBUG_DATA_SEND 0xc0040000
-
-#define RESOURCE_CMD_REQ_RESC_MASK 0x0000001F
-#define RESOURCE_CMD_REQ_RESC_SHIFT 0
-#define RESOURCE_CMD_REQ_OPCODE_MASK 0x000000E0
-#define RESOURCE_CMD_REQ_OPCODE_SHIFT 5
-#define RESOURCE_OPCODE_REQ 1
-#define RESOURCE_OPCODE_REQ_WO_AGING 2
-#define RESOURCE_OPCODE_REQ_W_AGING 3
-#define RESOURCE_OPCODE_RELEASE 4
-#define RESOURCE_OPCODE_FORCE_RELEASE 5
-#define RESOURCE_CMD_REQ_AGE_MASK 0x0000FF00
-#define RESOURCE_CMD_REQ_AGE_SHIFT 8
-
-#define RESOURCE_CMD_RSP_OWNER_MASK 0x000000FF
-#define RESOURCE_CMD_RSP_OWNER_SHIFT 0
-#define RESOURCE_CMD_RSP_OPCODE_MASK 0x00000700
-#define RESOURCE_CMD_RSP_OPCODE_SHIFT 8
-#define RESOURCE_OPCODE_GNT 1
-#define RESOURCE_OPCODE_BUSY 2
-#define RESOURCE_OPCODE_RELEASED 3
-#define RESOURCE_OPCODE_RELEASED_PREVIOUS 4
-#define RESOURCE_OPCODE_WRONG_OWNER 5
-#define RESOURCE_OPCODE_UNKNOWN_CMD 255
-
-#define RESOURCE_DUMP 0
-
-/* DRV_MSG_CODE_MDUMP_CMD parameters */
-#define MDUMP_DRV_PARAM_OPCODE_MASK 0x0000000f
-#define DRV_MSG_CODE_MDUMP_ACK 0x01
-#define DRV_MSG_CODE_MDUMP_SET_VALUES 0x02
-#define DRV_MSG_CODE_MDUMP_TRIGGER 0x03
-#define DRV_MSG_CODE_MDUMP_GET_CONFIG 0x04
-#define DRV_MSG_CODE_MDUMP_SET_ENABLE 0x05
-#define DRV_MSG_CODE_MDUMP_CLEAR_LOGS 0x06
-#define DRV_MSG_CODE_MDUMP_GET_RETAIN 0x07
-#define DRV_MSG_CODE_MDUMP_CLR_RETAIN 0x08
-
-#define DRV_MSG_CODE_HW_DUMP_TRIGGER 0x0a
-#define DRV_MSG_CODE_MDUMP_GEN_MDUMP2 0x0b
-#define DRV_MSG_CODE_MDUMP_FREE_MDUMP2 0x0c
-
-#define DRV_MSG_CODE_GET_PF_RDMA_PROTOCOL 0x002b0000
-#define DRV_MSG_CODE_OS_WOL 0x002e0000
-
-#define DRV_MSG_CODE_FEATURE_SUPPORT 0x00300000
-#define DRV_MSG_CODE_GET_MFW_FEATURE_SUPPORT 0x00310000
-#define DRV_MSG_SEQ_NUMBER_MASK 0x0000ffff
-
- u32 drv_mb_param;
-#define DRV_MB_PARAM_UNLOAD_WOL_UNKNOWN 0x00000000
-#define DRV_MB_PARAM_UNLOAD_WOL_MCP 0x00000001
-#define DRV_MB_PARAM_UNLOAD_WOL_DISABLED 0x00000002
-#define DRV_MB_PARAM_UNLOAD_WOL_ENABLED 0x00000003
-#define DRV_MB_PARAM_DCBX_NOTIFY_MASK 0x000000FF
-#define DRV_MB_PARAM_DCBX_NOTIFY_SHIFT 3
-
-#define DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MBI 0x3
-#define DRV_MB_PARAM_NVM_OFFSET_OFFSET 0
-#define DRV_MB_PARAM_NVM_OFFSET_MASK 0x00FFFFFF
-#define DRV_MB_PARAM_NVM_LEN_OFFSET 24
-#define DRV_MB_PARAM_NVM_LEN_MASK 0xFF000000
-
-#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT 0
-#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK 0x000000FF
-#define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT 8
-#define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK 0x0000FF00
-#define DRV_MB_PARAM_LLDP_SEND_MASK 0x00000001
-#define DRV_MB_PARAM_LLDP_SEND_SHIFT 0
-
-#define DRV_MB_PARAM_OV_CURR_CFG_SHIFT 0
-#define DRV_MB_PARAM_OV_CURR_CFG_MASK 0x0000000F
-#define DRV_MB_PARAM_OV_CURR_CFG_NONE 0
-#define DRV_MB_PARAM_OV_CURR_CFG_OS 1
-#define DRV_MB_PARAM_OV_CURR_CFG_VENDOR_SPEC 2
-#define DRV_MB_PARAM_OV_CURR_CFG_OTHER 3
-
-#define DRV_MB_PARAM_OV_STORM_FW_VER_SHIFT 0
-#define DRV_MB_PARAM_OV_STORM_FW_VER_MASK 0xFFFFFFFF
-#define DRV_MB_PARAM_OV_STORM_FW_VER_MAJOR_MASK 0xFF000000
-#define DRV_MB_PARAM_OV_STORM_FW_VER_MINOR_MASK 0x00FF0000
-#define DRV_MB_PARAM_OV_STORM_FW_VER_BUILD_MASK 0x0000FF00
-#define DRV_MB_PARAM_OV_STORM_FW_VER_DROP_MASK 0x000000FF
-
-#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_SHIFT 0
-#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_MASK 0xF
-#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_UNKNOWN 0x1
-#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED 0x2
-#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_LOADING 0x3
-#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED 0x4
-#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE 0x5
-
-#define DRV_MB_PARAM_OV_MTU_SIZE_SHIFT 0
-#define DRV_MB_PARAM_OV_MTU_SIZE_MASK 0xFFFFFFFF
-
-#define DRV_MB_PARAM_WOL_MASK (DRV_MB_PARAM_WOL_DEFAULT | \
- DRV_MB_PARAM_WOL_DISABLED | \
- DRV_MB_PARAM_WOL_ENABLED)
-#define DRV_MB_PARAM_WOL_DEFAULT DRV_MB_PARAM_UNLOAD_WOL_MCP
-#define DRV_MB_PARAM_WOL_DISABLED DRV_MB_PARAM_UNLOAD_WOL_DISABLED
-#define DRV_MB_PARAM_WOL_ENABLED DRV_MB_PARAM_UNLOAD_WOL_ENABLED
-
-#define DRV_MB_PARAM_ESWITCH_MODE_MASK (DRV_MB_PARAM_ESWITCH_MODE_NONE | \
- DRV_MB_PARAM_ESWITCH_MODE_VEB | \
- DRV_MB_PARAM_ESWITCH_MODE_VEPA)
-#define DRV_MB_PARAM_ESWITCH_MODE_NONE 0x0
-#define DRV_MB_PARAM_ESWITCH_MODE_VEB 0x1
-#define DRV_MB_PARAM_ESWITCH_MODE_VEPA 0x2
-
-#define DRV_MB_PARAM_DUMMY_OEM_UPDATES_MASK 0x1
-#define DRV_MB_PARAM_DUMMY_OEM_UPDATES_OFFSET 0
-
-#define DRV_MB_PARAM_SET_LED_MODE_OPER 0x0
-#define DRV_MB_PARAM_SET_LED_MODE_ON 0x1
-#define DRV_MB_PARAM_SET_LED_MODE_OFF 0x2
-
-#define DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET 0
-#define DRV_MB_PARAM_TRANSCEIVER_PORT_MASK 0x00000003
-#define DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET 2
-#define DRV_MB_PARAM_TRANSCEIVER_SIZE_MASK 0x000000fc
-#define DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET 8
-#define DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK 0x0000ff00
-#define DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET 16
-#define DRV_MB_PARAM_TRANSCEIVER_OFFSET_MASK 0xffff0000
-
- /* Resource Allocation params - Driver version support */
-#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_MASK 0xffff0000
-#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT 16
-#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_MASK 0x0000ffff
-#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT 0
-
-#define DRV_MB_PARAM_BIST_REGISTER_TEST 1
-#define DRV_MB_PARAM_BIST_CLOCK_TEST 2
-#define DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES 3
-#define DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX 4
-
-#define DRV_MB_PARAM_BIST_RC_UNKNOWN 0
-#define DRV_MB_PARAM_BIST_RC_PASSED 1
-#define DRV_MB_PARAM_BIST_RC_FAILED 2
-#define DRV_MB_PARAM_BIST_RC_INVALID_PARAMETER 3
-
-#define DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT 0
-#define DRV_MB_PARAM_BIST_TEST_INDEX_MASK 0x000000ff
-#define DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_SHIFT 8
-#define DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_MASK 0x0000ff00
-
-#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_MASK 0x0000ffff
-#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_OFFSET 0
-#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE 0x00000002
-#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_FEC_CONTROL 0x00000004
-#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EXT_SPEED_FEC_CONTROL 0x00000008
-#define DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK 0x00010000
-
-/* DRV_MSG_CODE_DEBUG_DATA_SEND parameters */
-#define DRV_MSG_CODE_DEBUG_DATA_SEND_SIZE_OFFSET 0
-#define DRV_MSG_CODE_DEBUG_DATA_SEND_SIZE_MASK 0xff
-
-/* Driver attributes params */
-#define DRV_MB_PARAM_ATTRIBUTE_KEY_OFFSET 0
-#define DRV_MB_PARAM_ATTRIBUTE_KEY_MASK 0x00ffffff
-#define DRV_MB_PARAM_ATTRIBUTE_CMD_OFFSET 24
-#define DRV_MB_PARAM_ATTRIBUTE_CMD_MASK 0xff000000
-
-#define DRV_MB_PARAM_NVM_CFG_OPTION_ID_OFFSET 0
-#define DRV_MB_PARAM_NVM_CFG_OPTION_ID_SHIFT 0
-#define DRV_MB_PARAM_NVM_CFG_OPTION_ID_MASK 0x0000ffff
-#define DRV_MB_PARAM_NVM_CFG_OPTION_ALL_SHIFT 16
-#define DRV_MB_PARAM_NVM_CFG_OPTION_ALL_MASK 0x00010000
-#define DRV_MB_PARAM_NVM_CFG_OPTION_INIT_SHIFT 17
-#define DRV_MB_PARAM_NVM_CFG_OPTION_INIT_MASK 0x00020000
-#define DRV_MB_PARAM_NVM_CFG_OPTION_COMMIT_SHIFT 18
-#define DRV_MB_PARAM_NVM_CFG_OPTION_COMMIT_MASK 0x00040000
-#define DRV_MB_PARAM_NVM_CFG_OPTION_FREE_SHIFT 19
-#define DRV_MB_PARAM_NVM_CFG_OPTION_FREE_MASK 0x00080000
-#define DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_SEL_SHIFT 20
-#define DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_SEL_MASK 0x00100000
-#define DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_ID_SHIFT 24
-#define DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_ID_MASK 0x0f000000
-
- u32 fw_mb_header;
-#define FW_MSG_CODE_MASK 0xffff0000
-#define FW_MSG_CODE_UNSUPPORTED 0x00000000
-#define FW_MSG_CODE_DRV_LOAD_ENGINE 0x10100000
-#define FW_MSG_CODE_DRV_LOAD_PORT 0x10110000
-#define FW_MSG_CODE_DRV_LOAD_FUNCTION 0x10120000
-#define FW_MSG_CODE_DRV_LOAD_REFUSED_PDA 0x10200000
-#define FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1 0x10210000
-#define FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG 0x10220000
-#define FW_MSG_CODE_DRV_LOAD_REFUSED_HSI 0x10230000
-#define FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE 0x10300000
-#define FW_MSG_CODE_DRV_LOAD_REFUSED_REJECT 0x10310000
-#define FW_MSG_CODE_DRV_LOAD_DONE 0x11100000
-#define FW_MSG_CODE_DRV_UNLOAD_ENGINE 0x20110000
-#define FW_MSG_CODE_DRV_UNLOAD_PORT 0x20120000
-#define FW_MSG_CODE_DRV_UNLOAD_FUNCTION 0x20130000
-#define FW_MSG_CODE_DRV_UNLOAD_DONE 0x21100000
-#define FW_MSG_CODE_RESOURCE_ALLOC_OK 0x34000000
-#define FW_MSG_CODE_RESOURCE_ALLOC_UNKNOWN 0x35000000
-#define FW_MSG_CODE_RESOURCE_ALLOC_DEPRECATED 0x36000000
-#define FW_MSG_CODE_S_TAG_UPDATE_ACK_DONE 0x3b000000
-#define FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE 0xb0010000
-
-#define FW_MSG_CODE_NVM_OK 0x00010000
-#define FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK 0x00400000
-#define FW_MSG_CODE_PHY_OK 0x00110000
-#define FW_MSG_CODE_OK 0x00160000
-#define FW_MSG_CODE_ERROR 0x00170000
-#define FW_MSG_CODE_TRANSCEIVER_DIAG_OK 0x00160000
-#define FW_MSG_CODE_TRANSCEIVER_DIAG_ERROR 0x00170000
-#define FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT 0x00020000
-
-#define FW_MSG_CODE_OS_WOL_SUPPORTED 0x00800000
-#define FW_MSG_CODE_OS_WOL_NOT_SUPPORTED 0x00810000
-#define FW_MSG_CODE_DRV_CFG_PF_VFS_MSIX_DONE 0x00870000
-#define FW_MSG_SEQ_NUMBER_MASK 0x0000ffff
-
-#define FW_MSG_CODE_DEBUG_DATA_SEND_INV_ARG 0xb0070000
-#define FW_MSG_CODE_DEBUG_DATA_SEND_BUF_FULL 0xb0080000
-#define FW_MSG_CODE_DEBUG_DATA_SEND_NO_BUF 0xb0090000
-#define FW_MSG_CODE_DEBUG_NOT_ENABLED 0xb00a0000
-#define FW_MSG_CODE_DEBUG_DATA_SEND_OK 0xb00b0000
-
-#define FW_MSG_CODE_MDUMP_INVALID_CMD 0x00030000
-
- u32 fw_mb_param;
-#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_MASK 0xffff0000
-#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT 16
-#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_MASK 0x0000ffff
-#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT 0
-
- /* Get PF RDMA protocol command response */
-#define FW_MB_PARAM_GET_PF_RDMA_NONE 0x0
-#define FW_MB_PARAM_GET_PF_RDMA_ROCE 0x1
-#define FW_MB_PARAM_GET_PF_RDMA_IWARP 0x2
-#define FW_MB_PARAM_GET_PF_RDMA_BOTH 0x3
-
- /* Get MFW feature support response */
-#define FW_MB_PARAM_FEATURE_SUPPORT_SMARTLINQ BIT(0)
-#define FW_MB_PARAM_FEATURE_SUPPORT_EEE BIT(1)
-#define FW_MB_PARAM_FEATURE_SUPPORT_FEC_CONTROL BIT(5)
-#define FW_MB_PARAM_FEATURE_SUPPORT_EXT_SPEED_FEC_CONTROL BIT(6)
-#define FW_MB_PARAM_FEATURE_SUPPORT_VLINK BIT(16)
-
-#define FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR BIT(0)
-
-#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALID_MASK 0x00000001
-#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALID_SHIFT 0
-#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALUE_MASK 0x00000002
-#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALUE_SHIFT 1
-#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALID_MASK 0x00000004
-#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALID_SHIFT 2
-#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALUE_MASK 0x00000008
-#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALUE_SHIFT 3
-
-#define FW_MB_PARAM_PPFID_BITMAP_MASK 0xff
-#define FW_MB_PARAM_PPFID_BITMAP_SHIFT 0
-
- u32 drv_pulse_mb;
-#define DRV_PULSE_SEQ_MASK 0x00007fff
-#define DRV_PULSE_SYSTEM_TIME_MASK 0xffff0000
-#define DRV_PULSE_ALWAYS_ALIVE 0x00008000
-
- u32 mcp_pulse_mb;
-#define MCP_PULSE_SEQ_MASK 0x00007fff
-#define MCP_PULSE_ALWAYS_ALIVE 0x00008000
-#define MCP_EVENT_MASK 0xffff0000
-#define MCP_EVENT_OTHER_DRIVER_RESET_REQ 0x00010000
-
- union drv_union_data union_data;
-};
-
-#define FW_MB_PARAM_NVM_PUT_FILE_REQ_OFFSET_MASK 0x00ffffff
-#define FW_MB_PARAM_NVM_PUT_FILE_REQ_OFFSET_SHIFT 0
-#define FW_MB_PARAM_NVM_PUT_FILE_REQ_SIZE_MASK 0xff000000
-#define FW_MB_PARAM_NVM_PUT_FILE_REQ_SIZE_SHIFT 24
-
-enum MFW_DRV_MSG_TYPE {
- MFW_DRV_MSG_LINK_CHANGE,
- MFW_DRV_MSG_FLR_FW_ACK_FAILED,
- MFW_DRV_MSG_VF_DISABLED,
- MFW_DRV_MSG_LLDP_DATA_UPDATED,
- MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED,
- MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED,
- MFW_DRV_MSG_ERROR_RECOVERY,
- MFW_DRV_MSG_BW_UPDATE,
- MFW_DRV_MSG_S_TAG_UPDATE,
- MFW_DRV_MSG_GET_LAN_STATS,
- MFW_DRV_MSG_GET_FCOE_STATS,
- MFW_DRV_MSG_GET_ISCSI_STATS,
- MFW_DRV_MSG_GET_RDMA_STATS,
- MFW_DRV_MSG_FAILURE_DETECTED,
- MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE,
- MFW_DRV_MSG_CRITICAL_ERROR_OCCURRED,
- MFW_DRV_MSG_RESERVED,
- MFW_DRV_MSG_GET_TLV_REQ,
- MFW_DRV_MSG_OEM_CFG_UPDATE,
- MFW_DRV_MSG_MAX
-};
-
-#define MFW_DRV_MSG_MAX_DWORDS(msgs) (((msgs - 1) >> 2) + 1)
-#define MFW_DRV_MSG_DWORD(msg_id) (msg_id >> 2)
-#define MFW_DRV_MSG_OFFSET(msg_id) ((msg_id & 0x3) << 3)
-#define MFW_DRV_MSG_MASK(msg_id) (0xff << MFW_DRV_MSG_OFFSET(msg_id))
-
-struct public_mfw_mb {
- u32 sup_msgs;
- u32 msg[MFW_DRV_MSG_MAX_DWORDS(MFW_DRV_MSG_MAX)];
- u32 ack[MFW_DRV_MSG_MAX_DWORDS(MFW_DRV_MSG_MAX)];
-};
-
-enum public_sections {
- PUBLIC_DRV_MB,
- PUBLIC_MFW_MB,
- PUBLIC_GLOBAL,
- PUBLIC_PATH,
- PUBLIC_PORT,
- PUBLIC_FUNC,
- PUBLIC_MAX_SECTIONS
-};
-
-struct mcp_public_data {
- u32 num_sections;
- u32 sections[PUBLIC_MAX_SECTIONS];
- struct public_drv_mb drv_mb[MCP_GLOB_FUNC_MAX];
- struct public_mfw_mb mfw_mb[MCP_GLOB_FUNC_MAX];
- struct public_global global;
- struct public_path path[MCP_GLOB_PATH_MAX];
- struct public_port port[MCP_GLOB_PORT_MAX];
- struct public_func func[MCP_GLOB_FUNC_MAX];
-};
-
-#define MAX_I2C_TRANSACTION_SIZE 16
-
-/* OCBB definitions */
-enum tlvs {
- /* Category 1: Device Properties */
- DRV_TLV_CLP_STR,
- DRV_TLV_CLP_STR_CTD,
- /* Category 6: Device Configuration */
- DRV_TLV_SCSI_TO,
- DRV_TLV_R_T_TOV,
- DRV_TLV_R_A_TOV,
- DRV_TLV_E_D_TOV,
- DRV_TLV_CR_TOV,
- DRV_TLV_BOOT_TYPE,
- /* Category 8: Port Configuration */
- DRV_TLV_NPIV_ENABLED,
- /* Category 10: Function Configuration */
- DRV_TLV_FEATURE_FLAGS,
- DRV_TLV_LOCAL_ADMIN_ADDR,
- DRV_TLV_ADDITIONAL_MAC_ADDR_1,
- DRV_TLV_ADDITIONAL_MAC_ADDR_2,
- DRV_TLV_LSO_MAX_OFFLOAD_SIZE,
- DRV_TLV_LSO_MIN_SEGMENT_COUNT,
- DRV_TLV_PROMISCUOUS_MODE,
- DRV_TLV_TX_DESCRIPTORS_QUEUE_SIZE,
- DRV_TLV_RX_DESCRIPTORS_QUEUE_SIZE,
- DRV_TLV_NUM_OF_NET_QUEUE_VMQ_CFG,
- DRV_TLV_FLEX_NIC_OUTER_VLAN_ID,
- DRV_TLV_OS_DRIVER_STATES,
- DRV_TLV_PXE_BOOT_PROGRESS,
- /* Category 12: FC/FCoE Configuration */
- DRV_TLV_NPIV_STATE,
- DRV_TLV_NUM_OF_NPIV_IDS,
- DRV_TLV_SWITCH_NAME,
- DRV_TLV_SWITCH_PORT_NUM,
- DRV_TLV_SWITCH_PORT_ID,
- DRV_TLV_VENDOR_NAME,
- DRV_TLV_SWITCH_MODEL,
- DRV_TLV_SWITCH_FW_VER,
- DRV_TLV_QOS_PRIORITY_PER_802_1P,
- DRV_TLV_PORT_ALIAS,
- DRV_TLV_PORT_STATE,
- DRV_TLV_FIP_TX_DESCRIPTORS_QUEUE_SIZE,
- DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_SIZE,
- DRV_TLV_LINK_FAILURE_COUNT,
- DRV_TLV_FCOE_BOOT_PROGRESS,
- /* Category 13: iSCSI Configuration */
- DRV_TLV_TARGET_LLMNR_ENABLED,
- DRV_TLV_HEADER_DIGEST_FLAG_ENABLED,
- DRV_TLV_DATA_DIGEST_FLAG_ENABLED,
- DRV_TLV_AUTHENTICATION_METHOD,
- DRV_TLV_ISCSI_BOOT_TARGET_PORTAL,
- DRV_TLV_MAX_FRAME_SIZE,
- DRV_TLV_PDU_TX_DESCRIPTORS_QUEUE_SIZE,
- DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_SIZE,
- DRV_TLV_ISCSI_BOOT_PROGRESS,
- /* Category 20: Device Data */
- DRV_TLV_PCIE_BUS_RX_UTILIZATION,
- DRV_TLV_PCIE_BUS_TX_UTILIZATION,
- DRV_TLV_DEVICE_CPU_CORES_UTILIZATION,
- DRV_TLV_LAST_VALID_DCC_TLV_RECEIVED,
- DRV_TLV_NCSI_RX_BYTES_RECEIVED,
- DRV_TLV_NCSI_TX_BYTES_SENT,
- /* Category 22: Base Port Data */
- DRV_TLV_RX_DISCARDS,
- DRV_TLV_RX_ERRORS,
- DRV_TLV_TX_ERRORS,
- DRV_TLV_TX_DISCARDS,
- DRV_TLV_RX_FRAMES_RECEIVED,
- DRV_TLV_TX_FRAMES_SENT,
- /* Category 23: FC/FCoE Port Data */
- DRV_TLV_RX_BROADCAST_PACKETS,
- DRV_TLV_TX_BROADCAST_PACKETS,
- /* Category 28: Base Function Data */
- DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV4,
- DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV6,
- DRV_TLV_TX_DESCRIPTOR_QUEUE_AVG_DEPTH,
- DRV_TLV_RX_DESCRIPTORS_QUEUE_AVG_DEPTH,
- DRV_TLV_PF_RX_FRAMES_RECEIVED,
- DRV_TLV_RX_BYTES_RECEIVED,
- DRV_TLV_PF_TX_FRAMES_SENT,
- DRV_TLV_TX_BYTES_SENT,
- DRV_TLV_IOV_OFFLOAD,
- DRV_TLV_PCI_ERRORS_CAP_ID,
- DRV_TLV_UNCORRECTABLE_ERROR_STATUS,
- DRV_TLV_UNCORRECTABLE_ERROR_MASK,
- DRV_TLV_CORRECTABLE_ERROR_STATUS,
- DRV_TLV_CORRECTABLE_ERROR_MASK,
- DRV_TLV_PCI_ERRORS_AECC_REGISTER,
- DRV_TLV_TX_QUEUES_EMPTY,
- DRV_TLV_RX_QUEUES_EMPTY,
- DRV_TLV_TX_QUEUES_FULL,
- DRV_TLV_RX_QUEUES_FULL,
- /* Category 29: FC/FCoE Function Data */
- DRV_TLV_FCOE_TX_DESCRIPTOR_QUEUE_AVG_DEPTH,
- DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_AVG_DEPTH,
- DRV_TLV_FCOE_RX_FRAMES_RECEIVED,
- DRV_TLV_FCOE_RX_BYTES_RECEIVED,
- DRV_TLV_FCOE_TX_FRAMES_SENT,
- DRV_TLV_FCOE_TX_BYTES_SENT,
- DRV_TLV_CRC_ERROR_COUNT,
- DRV_TLV_CRC_ERROR_1_RECEIVED_SOURCE_FC_ID,
- DRV_TLV_CRC_ERROR_1_TIMESTAMP,
- DRV_TLV_CRC_ERROR_2_RECEIVED_SOURCE_FC_ID,
- DRV_TLV_CRC_ERROR_2_TIMESTAMP,
- DRV_TLV_CRC_ERROR_3_RECEIVED_SOURCE_FC_ID,
- DRV_TLV_CRC_ERROR_3_TIMESTAMP,
- DRV_TLV_CRC_ERROR_4_RECEIVED_SOURCE_FC_ID,
- DRV_TLV_CRC_ERROR_4_TIMESTAMP,
- DRV_TLV_CRC_ERROR_5_RECEIVED_SOURCE_FC_ID,
- DRV_TLV_CRC_ERROR_5_TIMESTAMP,
- DRV_TLV_LOSS_OF_SYNC_ERROR_COUNT,
- DRV_TLV_LOSS_OF_SIGNAL_ERRORS,
- DRV_TLV_PRIMITIVE_SEQUENCE_PROTOCOL_ERROR_COUNT,
- DRV_TLV_DISPARITY_ERROR_COUNT,
- DRV_TLV_CODE_VIOLATION_ERROR_COUNT,
- DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_1,
- DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_2,
- DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_3,
- DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_4,
- DRV_TLV_LAST_FLOGI_TIMESTAMP,
- DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_1,
- DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_2,
- DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_3,
- DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_4,
- DRV_TLV_LAST_FLOGI_ACC_TIMESTAMP,
- DRV_TLV_LAST_FLOGI_RJT,
- DRV_TLV_LAST_FLOGI_RJT_TIMESTAMP,
- DRV_TLV_FDISCS_SENT_COUNT,
- DRV_TLV_FDISC_ACCS_RECEIVED,
- DRV_TLV_FDISC_RJTS_RECEIVED,
- DRV_TLV_PLOGI_SENT_COUNT,
- DRV_TLV_PLOGI_ACCS_RECEIVED,
- DRV_TLV_PLOGI_RJTS_RECEIVED,
- DRV_TLV_PLOGI_1_SENT_DESTINATION_FC_ID,
- DRV_TLV_PLOGI_1_TIMESTAMP,
- DRV_TLV_PLOGI_2_SENT_DESTINATION_FC_ID,
- DRV_TLV_PLOGI_2_TIMESTAMP,
- DRV_TLV_PLOGI_3_SENT_DESTINATION_FC_ID,
- DRV_TLV_PLOGI_3_TIMESTAMP,
- DRV_TLV_PLOGI_4_SENT_DESTINATION_FC_ID,
- DRV_TLV_PLOGI_4_TIMESTAMP,
- DRV_TLV_PLOGI_5_SENT_DESTINATION_FC_ID,
- DRV_TLV_PLOGI_5_TIMESTAMP,
- DRV_TLV_PLOGI_1_ACC_RECEIVED_SOURCE_FC_ID,
- DRV_TLV_PLOGI_1_ACC_TIMESTAMP,
- DRV_TLV_PLOGI_2_ACC_RECEIVED_SOURCE_FC_ID,
- DRV_TLV_PLOGI_2_ACC_TIMESTAMP,
- DRV_TLV_PLOGI_3_ACC_RECEIVED_SOURCE_FC_ID,
- DRV_TLV_PLOGI_3_ACC_TIMESTAMP,
- DRV_TLV_PLOGI_4_ACC_RECEIVED_SOURCE_FC_ID,
- DRV_TLV_PLOGI_4_ACC_TIMESTAMP,
- DRV_TLV_PLOGI_5_ACC_RECEIVED_SOURCE_FC_ID,
- DRV_TLV_PLOGI_5_ACC_TIMESTAMP,
- DRV_TLV_LOGOS_ISSUED,
- DRV_TLV_LOGO_ACCS_RECEIVED,
- DRV_TLV_LOGO_RJTS_RECEIVED,
- DRV_TLV_LOGO_1_RECEIVED_SOURCE_FC_ID,
- DRV_TLV_LOGO_1_TIMESTAMP,
- DRV_TLV_LOGO_2_RECEIVED_SOURCE_FC_ID,
- DRV_TLV_LOGO_2_TIMESTAMP,
- DRV_TLV_LOGO_3_RECEIVED_SOURCE_FC_ID,
- DRV_TLV_LOGO_3_TIMESTAMP,
- DRV_TLV_LOGO_4_RECEIVED_SOURCE_FC_ID,
- DRV_TLV_LOGO_4_TIMESTAMP,
- DRV_TLV_LOGO_5_RECEIVED_SOURCE_FC_ID,
- DRV_TLV_LOGO_5_TIMESTAMP,
- DRV_TLV_LOGOS_RECEIVED,
- DRV_TLV_ACCS_ISSUED,
- DRV_TLV_PRLIS_ISSUED,
- DRV_TLV_ACCS_RECEIVED,
- DRV_TLV_ABTS_SENT_COUNT,
- DRV_TLV_ABTS_ACCS_RECEIVED,
- DRV_TLV_ABTS_RJTS_RECEIVED,
- DRV_TLV_ABTS_1_SENT_DESTINATION_FC_ID,
- DRV_TLV_ABTS_1_TIMESTAMP,
- DRV_TLV_ABTS_2_SENT_DESTINATION_FC_ID,
- DRV_TLV_ABTS_2_TIMESTAMP,
- DRV_TLV_ABTS_3_SENT_DESTINATION_FC_ID,
- DRV_TLV_ABTS_3_TIMESTAMP,
- DRV_TLV_ABTS_4_SENT_DESTINATION_FC_ID,
- DRV_TLV_ABTS_4_TIMESTAMP,
- DRV_TLV_ABTS_5_SENT_DESTINATION_FC_ID,
- DRV_TLV_ABTS_5_TIMESTAMP,
- DRV_TLV_RSCNS_RECEIVED,
- DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_1,
- DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_2,
- DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_3,
- DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_4,
- DRV_TLV_LUN_RESETS_ISSUED,
- DRV_TLV_ABORT_TASK_SETS_ISSUED,
- DRV_TLV_TPRLOS_SENT,
- DRV_TLV_NOS_SENT_COUNT,
- DRV_TLV_NOS_RECEIVED_COUNT,
- DRV_TLV_OLS_COUNT,
- DRV_TLV_LR_COUNT,
- DRV_TLV_LRR_COUNT,
- DRV_TLV_LIP_SENT_COUNT,
- DRV_TLV_LIP_RECEIVED_COUNT,
- DRV_TLV_EOFA_COUNT,
- DRV_TLV_EOFNI_COUNT,
- DRV_TLV_SCSI_STATUS_CHECK_CONDITION_COUNT,
- DRV_TLV_SCSI_STATUS_CONDITION_MET_COUNT,
- DRV_TLV_SCSI_STATUS_BUSY_COUNT,
- DRV_TLV_SCSI_STATUS_INTERMEDIATE_COUNT,
- DRV_TLV_SCSI_STATUS_INTERMEDIATE_CONDITION_MET_COUNT,
- DRV_TLV_SCSI_STATUS_RESERVATION_CONFLICT_COUNT,
- DRV_TLV_SCSI_STATUS_TASK_SET_FULL_COUNT,
- DRV_TLV_SCSI_STATUS_ACA_ACTIVE_COUNT,
- DRV_TLV_SCSI_STATUS_TASK_ABORTED_COUNT,
- DRV_TLV_SCSI_CHECK_CONDITION_1_RECEIVED_SK_ASC_ASCQ,
- DRV_TLV_SCSI_CHECK_1_TIMESTAMP,
- DRV_TLV_SCSI_CHECK_CONDITION_2_RECEIVED_SK_ASC_ASCQ,
- DRV_TLV_SCSI_CHECK_2_TIMESTAMP,
- DRV_TLV_SCSI_CHECK_CONDITION_3_RECEIVED_SK_ASC_ASCQ,
- DRV_TLV_SCSI_CHECK_3_TIMESTAMP,
- DRV_TLV_SCSI_CHECK_CONDITION_4_RECEIVED_SK_ASC_ASCQ,
- DRV_TLV_SCSI_CHECK_4_TIMESTAMP,
- DRV_TLV_SCSI_CHECK_CONDITION_5_RECEIVED_SK_ASC_ASCQ,
- DRV_TLV_SCSI_CHECK_5_TIMESTAMP,
- /* Category 30: iSCSI Function Data */
- DRV_TLV_PDU_TX_DESCRIPTOR_QUEUE_AVG_DEPTH,
- DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_AVG_DEPTH,
- DRV_TLV_ISCSI_PDU_RX_FRAMES_RECEIVED,
- DRV_TLV_ISCSI_PDU_RX_BYTES_RECEIVED,
- DRV_TLV_ISCSI_PDU_TX_FRAMES_SENT,
- DRV_TLV_ISCSI_PDU_TX_BYTES_SENT
-};
-
-struct nvm_cfg_mac_address {
- u32 mac_addr_hi;
-#define NVM_CFG_MAC_ADDRESS_HI_MASK 0x0000ffff
-#define NVM_CFG_MAC_ADDRESS_HI_OFFSET 0
-
- u32 mac_addr_lo;
-};
-
-struct nvm_cfg1_glob {
- u32 generic_cont0;
-#define NVM_CFG1_GLOB_MF_MODE_MASK 0x00000ff0
-#define NVM_CFG1_GLOB_MF_MODE_OFFSET 4
-#define NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED 0x0
-#define NVM_CFG1_GLOB_MF_MODE_DEFAULT 0x1
-#define NVM_CFG1_GLOB_MF_MODE_SPIO4 0x2
-#define NVM_CFG1_GLOB_MF_MODE_NPAR1_0 0x3
-#define NVM_CFG1_GLOB_MF_MODE_NPAR1_5 0x4
-#define NVM_CFG1_GLOB_MF_MODE_NPAR2_0 0x5
-#define NVM_CFG1_GLOB_MF_MODE_BD 0x6
-#define NVM_CFG1_GLOB_MF_MODE_UFP 0x7
-
- u32 engineering_change[3];
- u32 manufacturing_id;
- u32 serial_number[4];
- u32 pcie_cfg;
- u32 mgmt_traffic;
-
- u32 core_cfg;
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK 0x000000ff
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET 0
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_2X40G 0x0
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X50G 0x1
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_1X100G 0x2
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X10G_F 0x3
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X10G_E 0x4
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X20G 0x5
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X40G 0xb
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G 0xc
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G 0xd
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X25G 0xe
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X10G 0xf
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_2X50G_R1 0x11
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_4X50G_R1 0x12
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_1X100G_R2 0x13
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_2X100G_R2 0x14
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_1X100G_R4 0x15
-
- u32 e_lane_cfg1;
- u32 e_lane_cfg2;
- u32 f_lane_cfg1;
- u32 f_lane_cfg2;
- u32 mps10_preemphasis;
- u32 mps10_driver_current;
- u32 mps25_preemphasis;
- u32 mps25_driver_current;
- u32 pci_id;
- u32 pci_subsys_id;
- u32 bar;
- u32 mps10_txfir_main;
- u32 mps10_txfir_post;
- u32 mps25_txfir_main;
- u32 mps25_txfir_post;
- u32 manufacture_ver;
- u32 manufacture_time;
- u32 led_global_settings;
- u32 generic_cont1;
-
- u32 mbi_version;
-#define NVM_CFG1_GLOB_MBI_VERSION_0_MASK 0x000000ff
-#define NVM_CFG1_GLOB_MBI_VERSION_0_OFFSET 0
-#define NVM_CFG1_GLOB_MBI_VERSION_1_MASK 0x0000ff00
-#define NVM_CFG1_GLOB_MBI_VERSION_1_OFFSET 8
-#define NVM_CFG1_GLOB_MBI_VERSION_2_MASK 0x00ff0000
-#define NVM_CFG1_GLOB_MBI_VERSION_2_OFFSET 16
-
- u32 mbi_date;
- u32 misc_sig;
-
- u32 device_capabilities;
-#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET 0x1
-#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_FCOE 0x2
-#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ISCSI 0x4
-#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ROCE 0x8
-
- u32 power_dissipated;
- u32 power_consumed;
- u32 efi_version;
- u32 multi_net_modes_cap;
- u32 reserved[41];
-};
-
-struct nvm_cfg1_path {
- u32 reserved[30];
-};
-
-struct nvm_cfg1_port {
- u32 rel_to_opt123;
- u32 rel_to_opt124;
-
- u32 generic_cont0;
-#define NVM_CFG1_PORT_DCBX_MODE_MASK 0x000f0000
-#define NVM_CFG1_PORT_DCBX_MODE_OFFSET 16
-#define NVM_CFG1_PORT_DCBX_MODE_DISABLED 0x0
-#define NVM_CFG1_PORT_DCBX_MODE_IEEE 0x1
-#define NVM_CFG1_PORT_DCBX_MODE_CEE 0x2
-#define NVM_CFG1_PORT_DCBX_MODE_DYNAMIC 0x3
-#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_MASK 0x00f00000
-#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_OFFSET 20
-#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_ETHERNET 0x1
-#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_FCOE 0x2
-#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_ISCSI 0x4
-
- u32 pcie_cfg;
- u32 features;
-
- u32 speed_cap_mask;
-#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK 0x0000ffff
-#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_OFFSET 0
-#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G 0x1
-#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G 0x2
-#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G 0x4
-#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G 0x8
-#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G 0x10
-#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G 0x20
-#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G 0x40
-
- u32 link_settings;
-#define NVM_CFG1_PORT_DRV_LINK_SPEED_MASK 0x0000000f
-#define NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET 0
-#define NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG 0x0
-#define NVM_CFG1_PORT_DRV_LINK_SPEED_1G 0x1
-#define NVM_CFG1_PORT_DRV_LINK_SPEED_10G 0x2
-#define NVM_CFG1_PORT_DRV_LINK_SPEED_20G 0x3
-#define NVM_CFG1_PORT_DRV_LINK_SPEED_25G 0x4
-#define NVM_CFG1_PORT_DRV_LINK_SPEED_40G 0x5
-#define NVM_CFG1_PORT_DRV_LINK_SPEED_50G 0x6
-#define NVM_CFG1_PORT_DRV_LINK_SPEED_BB_100G 0x7
-#define NVM_CFG1_PORT_DRV_LINK_SPEED_SMARTLINQ 0x8
-#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK 0x00000070
-#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET 4
-#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG 0x1
-#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX 0x2
-#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX 0x4
-#define NVM_CFG1_PORT_FEC_FORCE_MODE_MASK 0x000e0000
-#define NVM_CFG1_PORT_FEC_FORCE_MODE_OFFSET 17
-#define NVM_CFG1_PORT_FEC_FORCE_MODE_NONE 0x0
-#define NVM_CFG1_PORT_FEC_FORCE_MODE_FIRECODE 0x1
-#define NVM_CFG1_PORT_FEC_FORCE_MODE_RS 0x2
-#define NVM_CFG1_PORT_FEC_FORCE_MODE_AUTO 0x7
-
- u32 phy_cfg;
- u32 mgmt_traffic;
-
- u32 ext_phy;
- /* EEE power saving mode */
-#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_MASK 0x00ff0000
-#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_OFFSET 16
-#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_DISABLED 0x0
-#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_BALANCED 0x1
-#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_AGGRESSIVE 0x2
-#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_LOW_LATENCY 0x3
-
- u32 mba_cfg1;
- u32 mba_cfg2;
- u32 vf_cfg;
- struct nvm_cfg_mac_address lldp_mac_address;
- u32 led_port_settings;
- u32 transceiver_00;
- u32 device_ids;
-
- u32 board_cfg;
-#define NVM_CFG1_PORT_PORT_TYPE_MASK 0x000000ff
-#define NVM_CFG1_PORT_PORT_TYPE_OFFSET 0
-#define NVM_CFG1_PORT_PORT_TYPE_UNDEFINED 0x0
-#define NVM_CFG1_PORT_PORT_TYPE_MODULE 0x1
-#define NVM_CFG1_PORT_PORT_TYPE_BACKPLANE 0x2
-#define NVM_CFG1_PORT_PORT_TYPE_EXT_PHY 0x3
-#define NVM_CFG1_PORT_PORT_TYPE_MODULE_SLAVE 0x4
-
- u32 mnm_10g_cap;
- u32 mnm_10g_ctrl;
- u32 mnm_10g_misc;
- u32 mnm_25g_cap;
- u32 mnm_25g_ctrl;
- u32 mnm_25g_misc;
- u32 mnm_40g_cap;
- u32 mnm_40g_ctrl;
- u32 mnm_40g_misc;
- u32 mnm_50g_cap;
- u32 mnm_50g_ctrl;
- u32 mnm_50g_misc;
- u32 mnm_100g_cap;
- u32 mnm_100g_ctrl;
- u32 mnm_100g_misc;
-
- u32 temperature;
- u32 ext_phy_cfg1;
-
- u32 extended_speed;
-#define NVM_CFG1_PORT_EXTENDED_SPEED_MASK 0x0000ffff
-#define NVM_CFG1_PORT_EXTENDED_SPEED_OFFSET 0
-#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_AN 0x1
-#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_1G 0x2
-#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_10G 0x4
-#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_20G 0x8
-#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_25G 0x10
-#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_40G 0x20
-#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_50G_R 0x40
-#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_50G_R2 0x80
-#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_100G_R2 0x100
-#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_100G_R4 0x200
-#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_100G_P4 0x400
-#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_MASK 0xffff0000
-#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_OFFSET 16
-#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_RESERVED 0x1
-#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_1G 0x2
-#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_10G 0x4
-#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_20G 0x8
-#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_25G 0x10
-#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_40G 0x20
-#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_50G_R 0x40
-#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_50G_R2 0x80
-#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_100G_R2 0x100
-#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_100G_R4 0x200
-#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_100G_P4 0x400
-
- u32 extended_fec_mode;
-
- u32 reserved[112];
-};
-
-struct nvm_cfg1_func {
- struct nvm_cfg_mac_address mac_address;
- u32 rsrv1;
- u32 rsrv2;
- u32 device_id;
- u32 cmn_cfg;
- u32 pci_cfg;
- struct nvm_cfg_mac_address fcoe_node_wwn_mac_addr;
- struct nvm_cfg_mac_address fcoe_port_wwn_mac_addr;
- u32 preboot_generic_cfg;
- u32 reserved[8];
-};
-
-struct nvm_cfg1 {
- struct nvm_cfg1_glob glob;
- struct nvm_cfg1_path path[MCP_GLOB_PATH_MAX];
- struct nvm_cfg1_port port[MCP_GLOB_PORT_MAX];
- struct nvm_cfg1_func func[MCP_GLOB_FUNC_MAX];
-};
-
-enum spad_sections {
- SPAD_SECTION_TRACE,
- SPAD_SECTION_NVM_CFG,
- SPAD_SECTION_PUBLIC,
- SPAD_SECTION_PRIVATE,
- SPAD_SECTION_MAX
-};
-
-#define MCP_TRACE_SIZE 2048 /* 2kb */
-
-/* This section is located at a fixed location in the beginning of the
- * scratchpad, to ensure that the MCP trace is not run over during MFW upgrade.
- * All the rest of data has a floating location which differs from version to
- * version, and is pointed by the mcp_meta_data below.
- * Moreover, the spad_layout section is part of the MFW firmware, and is loaded
- * with it from nvram in order to clear this portion.
- */
-struct static_init {
- u32 num_sections;
- offsize_t sections[SPAD_SECTION_MAX];
-#define SECTION(_sec_) (*((offsize_t *)(STRUCT_OFFSET(sections[_sec_]))))
-
- struct mcp_trace trace;
-#define MCP_TRACE_P ((struct mcp_trace *)(STRUCT_OFFSET(trace)))
- u8 trace_buffer[MCP_TRACE_SIZE];
-#define MCP_TRACE_BUF ((u8 *)(STRUCT_OFFSET(trace_buffer)))
- /* running_mfw has the same definition as in nvm_map.h.
- * This bit indicate both the running dir, and the running bundle.
- * It is set once when the LIM is loaded.
- */
- u32 running_mfw;
-#define RUNNING_MFW (*((u32 *)(STRUCT_OFFSET(running_mfw))))
- u32 build_time;
-#define MFW_BUILD_TIME (*((u32 *)(STRUCT_OFFSET(build_time))))
- u32 reset_type;
-#define RESET_TYPE (*((u32 *)(STRUCT_OFFSET(reset_type))))
- u32 mfw_secure_mode;
-#define MFW_SECURE_MODE (*((u32 *)(STRUCT_OFFSET(mfw_secure_mode))))
- u16 pme_status_pf_bitmap;
-#define PME_STATUS_PF_BITMAP (*((u16 *)(STRUCT_OFFSET(pme_status_pf_bitmap))))
- u16 pme_enable_pf_bitmap;
-#define PME_ENABLE_PF_BITMAP (*((u16 *)(STRUCT_OFFSET(pme_enable_pf_bitmap))))
- u32 mim_nvm_addr;
- u32 mim_start_addr;
- u32 ah_pcie_link_params;
-#define AH_PCIE_LINK_PARAMS_LINK_SPEED_MASK (0x000000ff)
-#define AH_PCIE_LINK_PARAMS_LINK_SPEED_SHIFT (0)
-#define AH_PCIE_LINK_PARAMS_LINK_WIDTH_MASK (0x0000ff00)
-#define AH_PCIE_LINK_PARAMS_LINK_WIDTH_SHIFT (8)
-#define AH_PCIE_LINK_PARAMS_ASPM_MODE_MASK (0x00ff0000)
-#define AH_PCIE_LINK_PARAMS_ASPM_MODE_SHIFT (16)
-#define AH_PCIE_LINK_PARAMS_ASPM_CAP_MASK (0xff000000)
-#define AH_PCIE_LINK_PARAMS_ASPM_CAP_SHIFT (24)
-#define AH_PCIE_LINK_PARAMS (*((u32 *)(STRUCT_OFFSET(ah_pcie_link_params))))
-
- u32 rsrv_persist[5]; /* Persist reserved for MFW upgrades */
-};
-
-#define NVM_MAGIC_VALUE 0x669955aa
-
-enum nvm_image_type {
- NVM_TYPE_TIM1 = 0x01,
- NVM_TYPE_TIM2 = 0x02,
- NVM_TYPE_MIM1 = 0x03,
- NVM_TYPE_MIM2 = 0x04,
- NVM_TYPE_MBA = 0x05,
- NVM_TYPE_MODULES_PN = 0x06,
- NVM_TYPE_VPD = 0x07,
- NVM_TYPE_MFW_TRACE1 = 0x08,
- NVM_TYPE_MFW_TRACE2 = 0x09,
- NVM_TYPE_NVM_CFG1 = 0x0a,
- NVM_TYPE_L2B = 0x0b,
- NVM_TYPE_DIR1 = 0x0c,
- NVM_TYPE_EAGLE_FW1 = 0x0d,
- NVM_TYPE_FALCON_FW1 = 0x0e,
- NVM_TYPE_PCIE_FW1 = 0x0f,
- NVM_TYPE_HW_SET = 0x10,
- NVM_TYPE_LIM = 0x11,
- NVM_TYPE_AVS_FW1 = 0x12,
- NVM_TYPE_DIR2 = 0x13,
- NVM_TYPE_CCM = 0x14,
- NVM_TYPE_EAGLE_FW2 = 0x15,
- NVM_TYPE_FALCON_FW2 = 0x16,
- NVM_TYPE_PCIE_FW2 = 0x17,
- NVM_TYPE_AVS_FW2 = 0x18,
- NVM_TYPE_INIT_HW = 0x19,
- NVM_TYPE_DEFAULT_CFG = 0x1a,
- NVM_TYPE_MDUMP = 0x1b,
- NVM_TYPE_META = 0x1c,
- NVM_TYPE_ISCSI_CFG = 0x1d,
- NVM_TYPE_FCOE_CFG = 0x1f,
- NVM_TYPE_ETH_PHY_FW1 = 0x20,
- NVM_TYPE_ETH_PHY_FW2 = 0x21,
- NVM_TYPE_BDN = 0x22,
- NVM_TYPE_8485X_PHY_FW = 0x23,
- NVM_TYPE_PUB_KEY = 0x24,
- NVM_TYPE_RECOVERY = 0x25,
- NVM_TYPE_PLDM = 0x26,
- NVM_TYPE_UPK1 = 0x27,
- NVM_TYPE_UPK2 = 0x28,
- NVM_TYPE_MASTER_KC = 0x29,
- NVM_TYPE_BACKUP_KC = 0x2a,
- NVM_TYPE_HW_DUMP = 0x2b,
- NVM_TYPE_HW_DUMP_OUT = 0x2c,
- NVM_TYPE_BIN_NVM_META = 0x30,
- NVM_TYPE_ROM_TEST = 0xf0,
- NVM_TYPE_88X33X0_PHY_FW = 0x31,
- NVM_TYPE_88X33X0_PHY_SLAVE_FW = 0x32,
- NVM_TYPE_MAX,
-};
-
-#define DIR_ID_1 (0)
-
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.h b/drivers/net/ethernet/qlogic/qed/qed_hw.h
index 2734f49956f7..e535983ce21b 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hw.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hw.h
@@ -53,85 +53,94 @@ enum _dmae_cmd_crc_mask {
#define DMAE_MAX_CLIENTS 32
/**
- * @brief qed_gtt_init - Initialize GTT windows
+ * qed_gtt_init(): Initialize GTT windows.
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
+ *
+ * Return: Void.
*/
void qed_gtt_init(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_ptt_invalidate - Forces all ptt entries to be re-configured
+ * qed_ptt_invalidate(): Forces all ptt entries to be re-configured
+ *
+ * @p_hwfn: HW device data.
*
- * @param p_hwfn
+ * Return: Void.
*/
void qed_ptt_invalidate(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_ptt_pool_alloc - Allocate and initialize PTT pool
+ * qed_ptt_pool_alloc(): Allocate and initialize PTT pool.
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
*
- * @return struct _qed_status - success (0), negative - error.
+ * Return: struct _qed_status - success (0), negative - error.
*/
int qed_ptt_pool_alloc(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_ptt_pool_free -
+ * qed_ptt_pool_free(): Free PTT pool.
+ *
+ * @p_hwfn: HW device data.
*
- * @param p_hwfn
+ * Return: Void.
*/
void qed_ptt_pool_free(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_ptt_get_hw_addr - Get PTT's GRC/HW address
+ * qed_ptt_get_hw_addr(): Get PTT's GRC/HW address.
*
- * @param p_hwfn
- * @param p_ptt
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt
*
- * @return u32
+ * Return: u32.
*/
u32 qed_ptt_get_hw_addr(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
/**
- * @brief qed_ptt_get_bar_addr - Get PPT's external BAR address
+ * qed_ptt_get_bar_addr(): Get PPT's external BAR address.
*
- * @param p_hwfn
- * @param p_ptt
+ * @p_ptt: P_ptt
*
- * @return u32
+ * Return: u32.
*/
u32 qed_ptt_get_bar_addr(struct qed_ptt *p_ptt);
/**
- * @brief qed_ptt_set_win - Set PTT Window's GRC BAR address
+ * qed_ptt_set_win(): Set PTT Window's GRC BAR address
*
- * @param p_hwfn
- * @param new_hw_addr
- * @param p_ptt
+ * @p_hwfn: HW device data.
+ * @new_hw_addr: New HW address.
+ * @p_ptt: P_Ptt
+ *
+ * Return: Void.
*/
void qed_ptt_set_win(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 new_hw_addr);
/**
- * @brief qed_get_reserved_ptt - Get a specific reserved PTT
+ * qed_get_reserved_ptt(): Get a specific reserved PTT.
*
- * @param p_hwfn
- * @param ptt_idx
+ * @p_hwfn: HW device data.
+ * @ptt_idx: Ptt Index.
*
- * @return struct qed_ptt *
+ * Return: struct qed_ptt *.
*/
struct qed_ptt *qed_get_reserved_ptt(struct qed_hwfn *p_hwfn,
enum reserved_ptts ptt_idx);
/**
- * @brief qed_wr - Write value to BAR using the given ptt
+ * qed_wr(): Write value to BAR using the given ptt.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @val: Val.
+ * @hw_addr: HW address
*
- * @param p_hwfn
- * @param p_ptt
- * @param val
- * @param hw_addr
+ * Return: Void.
*/
void qed_wr(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
@@ -139,26 +148,28 @@ void qed_wr(struct qed_hwfn *p_hwfn,
u32 val);
/**
- * @brief qed_rd - Read value from BAR using the given ptt
+ * qed_rd(): Read value from BAR using the given ptt.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @hw_addr: HW address
*
- * @param p_hwfn
- * @param p_ptt
- * @param val
- * @param hw_addr
+ * Return: Void.
*/
u32 qed_rd(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 hw_addr);
/**
- * @brief qed_memcpy_from - copy n bytes from BAR using the given
- * ptt
- *
- * @param p_hwfn
- * @param p_ptt
- * @param dest
- * @param hw_addr
- * @param n
+ * qed_memcpy_from(): Copy n bytes from BAR using the given ptt.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @dest: Destination.
+ * @hw_addr: HW address.
+ * @n: N
+ *
+ * Return: Void.
*/
void qed_memcpy_from(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
@@ -167,14 +178,15 @@ void qed_memcpy_from(struct qed_hwfn *p_hwfn,
size_t n);
/**
- * @brief qed_memcpy_to - copy n bytes to BAR using the given
- * ptt
- *
- * @param p_hwfn
- * @param p_ptt
- * @param hw_addr
- * @param src
- * @param n
+ * qed_memcpy_to(): Copy n bytes to BAR using the given ptt
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @hw_addr: HW address.
+ * @src: Source.
+ * @n: N
+ *
+ * Return: Void.
*/
void qed_memcpy_to(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
@@ -182,83 +194,97 @@ void qed_memcpy_to(struct qed_hwfn *p_hwfn,
void *src,
size_t n);
/**
- * @brief qed_fid_pretend - pretend to another function when
- * accessing the ptt window. There is no way to unpretend
- * a function. The only way to cancel a pretend is to
- * pretend back to the original function.
- *
- * @param p_hwfn
- * @param p_ptt
- * @param fid - fid field of pxp_pretend structure. Can contain
- * either pf / vf, port/path fields are don't care.
+ * qed_fid_pretend(): pretend to another function when
+ * accessing the ptt window. There is no way to unpretend
+ * a function. The only way to cancel a pretend is to
+ * pretend back to the original function.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @fid: fid field of pxp_pretend structure. Can contain
+ * either pf / vf, port/path fields are don't care.
+ *
+ * Return: Void.
*/
void qed_fid_pretend(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u16 fid);
/**
- * @brief qed_port_pretend - pretend to another port when
- * accessing the ptt window
+ * qed_port_pretend(): Pretend to another port when accessing the ptt window
*
- * @param p_hwfn
- * @param p_ptt
- * @param port_id - the port to pretend to
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @port_id: The port to pretend to
+ *
+ * Return: Void.
*/
void qed_port_pretend(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u8 port_id);
/**
- * @brief qed_port_unpretend - cancel any previously set port
- * pretend
+ * qed_port_unpretend(): Cancel any previously set port pretend
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
*
- * @param p_hwfn
- * @param p_ptt
+ * Return: Void.
*/
void qed_port_unpretend(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
/**
- * @brief qed_port_fid_pretend - pretend to another port and another function
- * when accessing the ptt window
+ * qed_port_fid_pretend(): Pretend to another port and another function
+ * when accessing the ptt window
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @port_id: The port to pretend to
+ * @fid: fid field of pxp_pretend structure. Can contain either pf / vf.
*
- * @param p_hwfn
- * @param p_ptt
- * @param port_id - the port to pretend to
- * @param fid - fid field of pxp_pretend structure. Can contain either pf / vf.
+ * Return: Void.
*/
void qed_port_fid_pretend(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u8 port_id, u16 fid);
/**
- * @brief qed_vfid_to_concrete - build a concrete FID for a
- * given VF ID
+ * qed_vfid_to_concrete(): Build a concrete FID for a given VF ID
*
- * @param p_hwfn
- * @param p_ptt
- * @param vfid
+ * @p_hwfn: HW device data.
+ * @vfid: VFID.
+ *
+ * Return: Void.
*/
u32 qed_vfid_to_concrete(struct qed_hwfn *p_hwfn, u8 vfid);
/**
- * @brief qed_dmae_idx_to_go_cmd - map the idx to dmae cmd
- * this is declared here since other files will require it.
- * @param idx
+ * qed_dmae_idx_to_go_cmd(): Map the idx to dmae cmd
+ * this is declared here since other files will require it.
+ *
+ * @idx: Index
+ *
+ * Return: Void.
*/
u32 qed_dmae_idx_to_go_cmd(u8 idx);
/**
- * @brief qed_dmae_info_alloc - Init the dmae_info structure
- * which is part of p_hwfn.
- * @param p_hwfn
+ * qed_dmae_info_alloc(): Init the dmae_info structure
+ * which is part of p_hwfn.
+ *
+ * @p_hwfn: HW device data.
+ *
+ * Return: Int.
*/
int qed_dmae_info_alloc(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_dmae_info_free - Free the dmae_info structure
- * which is part of p_hwfn
+ * qed_dmae_info_free(): Free the dmae_info structure
+ * which is part of p_hwfn.
+ *
+ * @p_hwfn: HW device data.
*
- * @param p_hwfn
+ * Return: Void.
*/
void qed_dmae_info_free(struct qed_hwfn *p_hwfn);
@@ -292,14 +318,16 @@ int qed_dmae_sanity(struct qed_hwfn *p_hwfn,
#define QED_HW_ERR_MAX_STR_SIZE 256
/**
- * @brief qed_hw_err_notify - Notify upper layer driver and management FW
- * about a HW error.
- *
- * @param p_hwfn
- * @param p_ptt
- * @param err_type
- * @param fmt - debug data buffer to send to the MFW
- * @param ... - buffer format args
+ * qed_hw_err_notify(): Notify upper layer driver and management FW
+ * about a HW error.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @err_type: Err Type.
+ * @fmt: Debug data buffer to send to the MFW
+ * @...: buffer format args
+ *
+ * Return void.
*/
void __printf(4, 5) __cold qed_hw_err_notify(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
index ea888a2c6ddb..321c43408153 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
/* QLogic qed NIC Driver
* Copyright (c) 2015-2017 QLogic Corporation
- * Copyright (c) 2019-2020 Marvell International Ltd.
+ * Copyright (c) 2019-2021 Marvell International Ltd.
*/
#include <linux/types.h>
@@ -13,17 +13,18 @@
#include "qed_hsi.h"
#include "qed_hw.h"
#include "qed_init_ops.h"
+#include "qed_iro_hsi.h"
#include "qed_reg_addr.h"
-#define CDU_VALIDATION_DEFAULT_CFG 61
+#define CDU_VALIDATION_DEFAULT_CFG CDU_CONTEXT_VALIDATION_DEFAULT_CFG
-static u16 con_region_offsets[3][NUM_OF_CONNECTION_TYPES_E4] = {
+static u16 con_region_offsets[3][NUM_OF_CONNECTION_TYPES] = {
{400, 336, 352, 368, 304, 384, 416, 352}, /* region 3 offsets */
{528, 496, 416, 512, 448, 512, 544, 480}, /* region 4 offsets */
{608, 544, 496, 576, 576, 592, 624, 560} /* region 5 offsets */
};
-static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = {
+static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES] = {
{240, 240, 112, 0, 0, 0, 0, 96} /* region 1 offsets */
};
@@ -42,25 +43,49 @@ static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = {
#define QM_BYPASS_EN 1
#define QM_BYTE_CRD_EN 1
+/* Initial VOQ byte credit */
+#define QM_INITIAL_VOQ_BYTE_CRD 98304
/* Other PQ constants */
#define QM_OTHER_PQS_PER_PF 4
+/* VOQ constants */
+#define MAX_NUM_VOQS (MAX_NUM_PORTS_K2 * NUM_TCS_4PORT_K2)
+#define VOQS_BIT_MASK (BIT(MAX_NUM_VOQS) - 1)
+
/* WFQ constants */
-/* Upper bound in MB, 10 * burst size of 1ms in 50Gbps */
-#define QM_WFQ_UPPER_BOUND 62500000
+/* PF WFQ increment value, 0x9000 = 4*9*1024 */
+#define QM_PF_WFQ_INC_VAL(weight) ((weight) * 0x9000)
+
+/* PF WFQ Upper bound, in MB, 10 * burst size of 1ms in 50Gbps */
+#define QM_PF_WFQ_UPPER_BOUND 62500000
+
+/* PF WFQ max increment value, 0.7 * upper bound */
+#define QM_PF_WFQ_MAX_INC_VAL ((QM_PF_WFQ_UPPER_BOUND * 7) / 10)
+
+/* Number of VOQs in E5 PF WFQ credit register (QmWfqCrd) */
+#define QM_PF_WFQ_CRD_E5_NUM_VOQS 16
+
+/* VP WFQ increment value */
+#define QM_VP_WFQ_INC_VAL(weight) ((weight) * QM_VP_WFQ_MIN_INC_VAL)
-/* Bit of VOQ in WFQ VP PQ map */
-#define QM_WFQ_VP_PQ_VOQ_SHIFT 0
+/* VP WFQ min increment value */
+#define QM_VP_WFQ_MIN_INC_VAL 10800
-/* Bit of PF in WFQ VP PQ map */
-#define QM_WFQ_VP_PQ_PF_E4_SHIFT 5
+/* VP WFQ max increment value, 2^30 */
+#define QM_VP_WFQ_MAX_INC_VAL 0x40000000
-/* 0x9000 = 4*9*1024 */
-#define QM_WFQ_INC_VAL(weight) ((weight) * 0x9000)
+/* VP WFQ bypass threshold */
+#define QM_VP_WFQ_BYPASS_THRESH (QM_VP_WFQ_MIN_INC_VAL - 100)
-/* Max WFQ increment value is 0.7 * upper bound */
-#define QM_WFQ_MAX_INC_VAL ((QM_WFQ_UPPER_BOUND * 7) / 10)
+/* VP RL credit task cost */
+#define QM_VP_RL_CRD_TASK_COST 9700
+
+/* Bit of VOQ in VP WFQ PQ map */
+#define QM_VP_WFQ_PQ_VOQ_SHIFT 0
+
+/* Bit of PF in VP WFQ PQ map */
+#define QM_VP_WFQ_PQ_PF_SHIFT 5
/* RL constants */
@@ -71,12 +96,13 @@ static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = {
#define QM_RL_PERIOD_CLK_25M (25 * QM_RL_PERIOD)
/* RL increment value - rate is specified in mbps */
-#define QM_RL_INC_VAL(rate) ({ \
- typeof(rate) __rate = (rate); \
- max_t(u32, \
- (u32)(((__rate ? __rate : 1000000) * QM_RL_PERIOD * 101) / \
- (8 * 100)), \
- 1); })
+#define QM_RL_INC_VAL(rate) ({ \
+ typeof(rate) __rate = (rate); \
+ max_t(u32, \
+ (u32)(((__rate ? __rate : \
+ 100000) * \
+ QM_RL_PERIOD * \
+ 101) / (8 * 100)), 1); })
/* PF RL Upper bound is set to 10 * burst size of 1ms in 50Gbps */
#define QM_PF_RL_UPPER_BOUND 62500000
@@ -84,16 +110,13 @@ static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = {
/* Max PF RL increment value is 0.7 * upper bound */
#define QM_PF_RL_MAX_INC_VAL ((QM_PF_RL_UPPER_BOUND * 7) / 10)
-/* Vport RL Upper bound, link speed is in Mpbs */
-#define QM_VP_RL_UPPER_BOUND(speed) ((u32)max_t(u32, \
- QM_RL_INC_VAL(speed), \
- 9700 + 1000))
-
-/* Max Vport RL increment value is the Vport RL upper bound */
-#define QM_VP_RL_MAX_INC_VAL(speed) QM_VP_RL_UPPER_BOUND(speed)
-
-/* Vport RL credit threshold in case of QM bypass */
-#define QM_VP_RL_BYPASS_THRESH_SPEED (QM_VP_RL_UPPER_BOUND(10000) - 1)
+/* QCN RL Upper bound, speed is in Mpbs */
+#define QM_GLOBAL_RL_UPPER_BOUND(speed) ((u32)max_t( \
+ u32, \
+ (u32)(((speed) * \
+ QM_RL_PERIOD * 101) / (8 * 100)), \
+ QM_VP_RL_CRD_TASK_COST \
+ + 1000))
/* AFullOprtnstcCrdMask constants */
#define QM_OPPOR_LINE_VOQ_DEF 1
@@ -156,20 +179,20 @@ static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = {
cmd ## _ ## field, \
value)
-#define QM_INIT_TX_PQ_MAP(p_hwfn, map, chip, pq_id, vp_pq_id, rl_valid, \
+#define QM_INIT_TX_PQ_MAP(p_hwfn, map, pq_id, vp_pq_id, rl_valid, \
rl_id, ext_voq, wrr) \
do { \
u32 __reg = 0; \
\
BUILD_BUG_ON(sizeof((map).reg) != sizeof(__reg)); \
- \
- SET_FIELD(__reg, QM_RF_PQ_MAP_##chip##_PQ_VALID, 1); \
- SET_FIELD(__reg, QM_RF_PQ_MAP_##chip##_RL_VALID, \
+ memset(&(map), 0, sizeof(map)); \
+ SET_FIELD(__reg, QM_RF_PQ_MAP_PQ_VALID, 1); \
+ SET_FIELD(__reg, QM_RF_PQ_MAP_RL_VALID, \
!!(rl_valid)); \
- SET_FIELD(__reg, QM_RF_PQ_MAP_##chip##_VP_PQ_ID, (vp_pq_id)); \
- SET_FIELD(__reg, QM_RF_PQ_MAP_##chip##_RL_ID, (rl_id)); \
- SET_FIELD(__reg, QM_RF_PQ_MAP_##chip##_VOQ, (ext_voq)); \
- SET_FIELD(__reg, QM_RF_PQ_MAP_##chip##_WRR_WEIGHT_GROUP, \
+ SET_FIELD(__reg, QM_RF_PQ_MAP_VP_PQ_ID, (vp_pq_id)); \
+ SET_FIELD(__reg, QM_RF_PQ_MAP_RL_ID, (rl_id)); \
+ SET_FIELD(__reg, QM_RF_PQ_MAP_VOQ, (ext_voq)); \
+ SET_FIELD(__reg, QM_RF_PQ_MAP_WRR_WEIGHT_GROUP, \
(wrr)); \
\
STORE_RT_REG((p_hwfn), QM_REG_TXPQMAP_RT_OFFSET + (pq_id), \
@@ -184,8 +207,8 @@ static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = {
(((rl) >> 8) << 9))
#define PQ_INFO_RAM_GRC_ADDRESS(pq_id) \
- XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + \
- XSTORM_PQ_INFO_OFFSET(pq_id)
+ (XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + \
+ XSTORM_PQ_INFO_OFFSET(pq_id))
/******************** INTERNAL IMPLEMENTATION *********************/
@@ -204,7 +227,7 @@ static void qed_enable_pf_rl(struct qed_hwfn *p_hwfn, bool pf_rl_en)
{
STORE_RT_REG(p_hwfn, QM_REG_RLPFENABLE_RT_OFFSET, pf_rl_en ? 1 : 0);
if (pf_rl_en) {
- u8 num_ext_voqs = MAX_NUM_VOQS_E4;
+ u8 num_ext_voqs = MAX_NUM_VOQS;
u64 voq_bit_mask = ((u64)1 << num_ext_voqs) - 1;
/* Enable RLs for all VOQs */
@@ -236,7 +259,7 @@ static void qed_enable_pf_wfq(struct qed_hwfn *p_hwfn, bool pf_wfq_en)
if (pf_wfq_en && QM_BYPASS_EN)
STORE_RT_REG(p_hwfn,
QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET,
- QM_WFQ_UPPER_BOUND);
+ QM_PF_WFQ_UPPER_BOUND);
}
/* Prepare global RL enable/disable runtime init values */
@@ -257,7 +280,7 @@ static void qed_enable_global_rl(struct qed_hwfn *p_hwfn, bool global_rl_en)
if (QM_BYPASS_EN)
STORE_RT_REG(p_hwfn,
QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET,
- QM_VP_RL_BYPASS_THRESH_SPEED);
+ QM_GLOBAL_RL_UPPER_BOUND(10000) - 1);
}
}
@@ -271,7 +294,7 @@ static void qed_enable_vport_wfq(struct qed_hwfn *p_hwfn, bool vport_wfq_en)
if (vport_wfq_en && QM_BYPASS_EN)
STORE_RT_REG(p_hwfn,
QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET,
- QM_WFQ_UPPER_BOUND);
+ QM_VP_WFQ_BYPASS_THRESH);
}
/* Prepare runtime init values to allocate PBF command queue lines for
@@ -291,14 +314,14 @@ static void qed_cmdq_lines_voq_rt_init(struct qed_hwfn *p_hwfn,
}
/* Prepare runtime init values to allocate PBF command queue lines. */
-static void qed_cmdq_lines_rt_init(
- struct qed_hwfn *p_hwfn,
- u8 max_ports_per_engine,
- u8 max_phys_tcs_per_port,
- struct init_qm_port_params port_params[MAX_NUM_PORTS])
+static void
+qed_cmdq_lines_rt_init(struct qed_hwfn *p_hwfn,
+ u8 max_ports_per_engine,
+ u8 max_phys_tcs_per_port,
+ struct init_qm_port_params port_params[MAX_NUM_PORTS])
{
u8 tc, ext_voq, port_id, num_tcs_in_port;
- u8 num_ext_voqs = MAX_NUM_VOQS_E4;
+ u8 num_ext_voqs = MAX_NUM_VOQS;
/* Clear PBF lines of all VOQs */
for (ext_voq = 0; ext_voq < num_ext_voqs; ext_voq++)
@@ -364,11 +387,11 @@ static void qed_cmdq_lines_rt_init(
* - No optimization for lossy TC (all are considered lossless). Shared space
* is not enabled and allocated for each TC.
*/
-static void qed_btb_blocks_rt_init(
- struct qed_hwfn *p_hwfn,
- u8 max_ports_per_engine,
- u8 max_phys_tcs_per_port,
- struct init_qm_port_params port_params[MAX_NUM_PORTS])
+static void
+qed_btb_blocks_rt_init(struct qed_hwfn *p_hwfn,
+ u8 max_ports_per_engine,
+ u8 max_phys_tcs_per_port,
+ struct init_qm_port_params port_params[MAX_NUM_PORTS])
{
u32 usable_blocks, pure_lb_blocks, phys_blocks;
u8 tc, ext_voq, port_id, num_tcs_in_port;
@@ -428,7 +451,7 @@ static void qed_btb_blocks_rt_init(
*/
static int qed_global_rl_rt_init(struct qed_hwfn *p_hwfn)
{
- u32 upper_bound = QM_VP_RL_UPPER_BOUND(QM_MAX_LINK_SPEED) |
+ u32 upper_bound = QM_GLOBAL_RL_UPPER_BOUND(QM_MAX_LINK_SPEED) |
(u32)QM_RL_CRD_REG_SIGN_BIT;
u32 inc_val;
u16 rl_id;
@@ -450,11 +473,73 @@ static int qed_global_rl_rt_init(struct qed_hwfn *p_hwfn)
return 0;
}
+/* Returns the upper bound for the specified Vport RL parameters.
+ * link_speed is in Mbps.
+ * Returns 0 in case of error.
+ */
+static u32 qed_get_vport_rl_upper_bound(enum init_qm_rl_type vport_rl_type,
+ u32 link_speed)
+{
+ switch (vport_rl_type) {
+ case QM_RL_TYPE_NORMAL:
+ return QM_INITIAL_VOQ_BYTE_CRD;
+ case QM_RL_TYPE_QCN:
+ return QM_GLOBAL_RL_UPPER_BOUND(link_speed);
+ default:
+ return 0;
+ }
+}
+
+/* Prepare VPORT RL runtime init values.
+ * Return -1 on error.
+ */
+static int qed_vport_rl_rt_init(struct qed_hwfn *p_hwfn,
+ u16 start_rl,
+ u16 num_rls,
+ u32 link_speed,
+ struct init_qm_rl_params *rl_params)
+{
+ u16 i, rl_id;
+
+ if (num_rls && start_rl + num_rls >= MAX_QM_GLOBAL_RLS) {
+ DP_NOTICE(p_hwfn, "Invalid rate limiter configuration\n");
+ return -1;
+ }
+
+ /* Go over all PF VPORTs */
+ for (i = 0, rl_id = start_rl; i < num_rls; i++, rl_id++) {
+ u32 upper_bound, inc_val;
+
+ upper_bound =
+ qed_get_vport_rl_upper_bound((enum init_qm_rl_type)
+ rl_params[i].vport_rl_type,
+ link_speed);
+
+ inc_val =
+ QM_RL_INC_VAL(rl_params[i].vport_rl ?
+ rl_params[i].vport_rl : link_speed);
+ if (inc_val > upper_bound) {
+ DP_NOTICE(p_hwfn,
+ "Invalid RL rate - limit configuration\n");
+ return -1;
+ }
+
+ STORE_RT_REG(p_hwfn, QM_REG_RLGLBLCRD_RT_OFFSET + rl_id,
+ (u32)QM_RL_CRD_REG_SIGN_BIT);
+ STORE_RT_REG(p_hwfn, QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + rl_id,
+ upper_bound | (u32)QM_RL_CRD_REG_SIGN_BIT);
+ STORE_RT_REG(p_hwfn, QM_REG_RLGLBLINCVAL_RT_OFFSET + rl_id,
+ inc_val);
+ }
+
+ return 0;
+}
+
/* Prepare Tx PQ mapping runtime init values for the specified PF */
-static void qed_tx_pq_map_rt_init(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- struct qed_qm_pf_rt_init_params *p_params,
- u32 base_mem_addr_4kb)
+static int qed_tx_pq_map_rt_init(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_qm_pf_rt_init_params *p_params,
+ u32 base_mem_addr_4kb)
{
u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 };
struct init_qm_vport_params *vport_params = p_params->vport_params;
@@ -487,7 +572,7 @@ static void qed_tx_pq_map_rt_init(struct qed_hwfn *p_hwfn,
/* Go over all Tx PQs */
for (i = 0, pq_id = p_params->start_pq; i < num_pqs; i++, pq_id++) {
u16 *p_first_tx_pq_id, vport_id_in_pf;
- struct qm_rf_pq_map_e4 tx_pq_map;
+ struct qm_rf_pq_map tx_pq_map;
u8 tc_id = pq_params[i].tc_id;
bool is_vf_pq;
u8 ext_voq;
@@ -504,8 +589,8 @@ static void qed_tx_pq_map_rt_init(struct qed_hwfn *p_hwfn,
&vport_params[vport_id_in_pf].first_tx_pq_id[tc_id];
if (*p_first_tx_pq_id == QM_INVALID_PQ_ID) {
u32 map_val =
- (ext_voq << QM_WFQ_VP_PQ_VOQ_SHIFT) |
- (p_params->pf_id << QM_WFQ_VP_PQ_PF_E4_SHIFT);
+ (ext_voq << QM_VP_WFQ_PQ_VOQ_SHIFT) |
+ (p_params->pf_id << QM_VP_WFQ_PQ_PF_SHIFT);
/* Create new VP PQ */
*p_first_tx_pq_id = pq_id;
@@ -520,7 +605,6 @@ static void qed_tx_pq_map_rt_init(struct qed_hwfn *p_hwfn,
/* Prepare PQ map entry */
QM_INIT_TX_PQ_MAP(p_hwfn,
tx_pq_map,
- E4,
pq_id,
*p_first_tx_pq_id,
pq_params[i].rl_valid,
@@ -570,6 +654,8 @@ static void qed_tx_pq_map_rt_init(struct qed_hwfn *p_hwfn,
STORE_RT_REG(p_hwfn,
QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET + i,
tx_pq_vf_mask[i]);
+
+ return 0;
}
/* Prepare Other PQ mapping runtime init values for the specified PF */
@@ -620,7 +706,6 @@ static void qed_other_pq_map_rt_init(struct qed_hwfn *p_hwfn,
* Return -1 on error.
*/
static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn,
-
struct qed_qm_pf_rt_init_params *p_params)
{
u16 num_tx_pqs = p_params->num_pf_pqs + p_params->num_vf_pqs;
@@ -629,8 +714,8 @@ static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn,
u8 ext_voq;
u16 i;
- inc_val = QM_WFQ_INC_VAL(p_params->pf_wfq);
- if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
+ inc_val = QM_PF_WFQ_INC_VAL(p_params->pf_wfq);
+ if (!inc_val || inc_val > QM_PF_WFQ_MAX_INC_VAL) {
DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration\n");
return -1;
}
@@ -652,7 +737,7 @@ static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn,
STORE_RT_REG(p_hwfn,
QM_REG_WFQPFUPPERBOUND_RT_OFFSET + p_params->pf_id,
- QM_WFQ_UPPER_BOUND | (u32)QM_WFQ_CRD_REG_SIGN_BIT);
+ QM_PF_WFQ_UPPER_BOUND | (u32)QM_WFQ_CRD_REG_SIGN_BIT);
STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + p_params->pf_id,
inc_val);
@@ -689,34 +774,38 @@ static int qed_vp_wfq_rt_init(struct qed_hwfn *p_hwfn,
u16 num_vports,
struct init_qm_vport_params *vport_params)
{
- u16 vport_pq_id, i;
+ u16 vport_pq_id, wfq, i;
u32 inc_val;
u8 tc;
/* Go over all PF VPORTs */
for (i = 0; i < num_vports; i++) {
- if (!vport_params[i].wfq)
- continue;
-
- inc_val = QM_WFQ_INC_VAL(vport_params[i].wfq);
- if (inc_val > QM_WFQ_MAX_INC_VAL) {
- DP_NOTICE(p_hwfn,
- "Invalid VPORT WFQ weight configuration\n");
- return -1;
- }
-
/* Each VPORT can have several VPORT PQ IDs for various TCs */
for (tc = 0; tc < NUM_OF_TCS; tc++) {
+ /* Check if VPORT/TC is valid */
vport_pq_id = vport_params[i].first_tx_pq_id[tc];
- if (vport_pq_id != QM_INVALID_PQ_ID) {
- STORE_RT_REG(p_hwfn,
- QM_REG_WFQVPCRD_RT_OFFSET +
- vport_pq_id,
- (u32)QM_WFQ_CRD_REG_SIGN_BIT);
- STORE_RT_REG(p_hwfn,
- QM_REG_WFQVPWEIGHT_RT_OFFSET +
- vport_pq_id, inc_val);
+ if (vport_pq_id == QM_INVALID_PQ_ID)
+ continue;
+
+ /* Find WFQ weight (per VPORT or per VPORT+TC) */
+ wfq = vport_params[i].wfq;
+ wfq = wfq ? wfq : vport_params[i].tc_wfq[tc];
+ inc_val = QM_VP_WFQ_INC_VAL(wfq);
+ if (inc_val > QM_VP_WFQ_MAX_INC_VAL) {
+ DP_NOTICE(p_hwfn,
+ "Invalid VPORT WFQ weight configuration\n");
+ return -1;
}
+
+ /* Config registers */
+ STORE_RT_REG(p_hwfn, QM_REG_WFQVPCRD_RT_OFFSET +
+ vport_pq_id,
+ (u32)QM_WFQ_CRD_REG_SIGN_BIT);
+ STORE_RT_REG(p_hwfn, QM_REG_WFQVPUPPERBOUND_RT_OFFSET +
+ vport_pq_id,
+ inc_val | QM_WFQ_CRD_REG_SIGN_BIT);
+ STORE_RT_REG(p_hwfn, QM_REG_WFQVPWEIGHT_RT_OFFSET +
+ vport_pq_id, inc_val);
}
}
@@ -780,11 +869,14 @@ int qed_qm_common_rt_init(struct qed_hwfn *p_hwfn,
SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_LINEVOQ,
QM_OPPOR_LINE_VOQ_DEF);
SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ, QM_BYTE_CRD_EN);
- SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_PFWFQ, p_params->pf_wfq_en);
- SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_VPWFQ, p_params->vport_wfq_en);
- SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_PFRL, p_params->pf_rl_en);
+ SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_PFWFQ,
+ p_params->pf_wfq_en ? 1 : 0);
+ SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_VPWFQ,
+ p_params->vport_wfq_en ? 1 : 0);
+ SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_PFRL,
+ p_params->pf_rl_en ? 1 : 0);
SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_VPQCNRL,
- p_params->global_rl_en);
+ p_params->global_rl_en ? 1 : 0);
SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_FWPAUSE, QM_OPPOR_FW_STOP_DEF);
SET_FIELD(mask,
QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY, QM_OPPOR_PQ_EMPTY_DEF);
@@ -830,7 +922,6 @@ int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn,
u16 i;
u8 tc;
-
/* Clear first Tx PQ ID array for each VPORT */
for (i = 0; i < p_params->num_vports; i++)
for (tc = 0; tc < NUM_OF_TCS; tc++)
@@ -843,7 +934,8 @@ int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn,
p_params->num_tids, 0);
/* Map Tx PQs */
- qed_tx_pq_map_rt_init(p_hwfn, p_ptt, p_params, other_mem_size_4kb);
+ if (qed_tx_pq_map_rt_init(p_hwfn, p_ptt, p_params, other_mem_size_4kb))
+ return -1;
/* Init PF WFQ */
if (p_params->pf_wfq)
@@ -858,15 +950,21 @@ int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn,
if (qed_vp_wfq_rt_init(p_hwfn, p_params->num_vports, vport_params))
return -1;
+ /* Set VPORT RL */
+ if (qed_vport_rl_rt_init(p_hwfn, p_params->start_rl,
+ p_params->num_rls, p_params->link_speed,
+ p_params->rl_params))
+ return -1;
+
return 0;
}
int qed_init_pf_wfq(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u8 pf_id, u16 pf_wfq)
{
- u32 inc_val = QM_WFQ_INC_VAL(pf_wfq);
+ u32 inc_val = QM_PF_WFQ_INC_VAL(pf_wfq);
- if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
+ if (!inc_val || inc_val > QM_PF_WFQ_MAX_INC_VAL) {
DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration\n");
return -1;
}
@@ -897,41 +995,66 @@ int qed_init_vport_wfq(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u16 first_tx_pq_id[NUM_OF_TCS], u16 wfq)
{
+ int result = 0;
u16 vport_pq_id;
- u32 inc_val;
u8 tc;
- inc_val = QM_WFQ_INC_VAL(wfq);
- if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
+ for (tc = 0; tc < NUM_OF_TCS && !result; tc++) {
+ vport_pq_id = first_tx_pq_id[tc];
+ if (vport_pq_id != QM_INVALID_PQ_ID)
+ result = qed_init_vport_tc_wfq(p_hwfn, p_ptt,
+ vport_pq_id, wfq);
+ }
+
+ return result;
+}
+
+int qed_init_vport_tc_wfq(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ u16 first_tx_pq_id, u16 wfq)
+{
+ u32 inc_val;
+
+ if (first_tx_pq_id == QM_INVALID_PQ_ID)
+ return -1;
+
+ inc_val = QM_VP_WFQ_INC_VAL(wfq);
+ if (!inc_val || inc_val > QM_VP_WFQ_MAX_INC_VAL) {
DP_NOTICE(p_hwfn, "Invalid VPORT WFQ configuration.\n");
return -1;
}
- /* A VPORT can have several VPORT PQ IDs for various TCs */
- for (tc = 0; tc < NUM_OF_TCS; tc++) {
- vport_pq_id = first_tx_pq_id[tc];
- if (vport_pq_id != QM_INVALID_PQ_ID)
- qed_wr(p_hwfn,
- p_ptt,
- QM_REG_WFQVPWEIGHT + vport_pq_id * 4, inc_val);
- }
+ qed_wr(p_hwfn, p_ptt, QM_REG_WFQVPCRD + first_tx_pq_id * 4,
+ (u32)QM_WFQ_CRD_REG_SIGN_BIT);
+ qed_wr(p_hwfn, p_ptt, QM_REG_WFQVPUPPERBOUND + first_tx_pq_id * 4,
+ inc_val | QM_WFQ_CRD_REG_SIGN_BIT);
+ qed_wr(p_hwfn, p_ptt, QM_REG_WFQVPWEIGHT + first_tx_pq_id * 4,
+ inc_val);
return 0;
}
int qed_init_global_rl(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt, u16 rl_id, u32 rate_limit)
+ struct qed_ptt *p_ptt, u16 rl_id, u32 rate_limit,
+ enum init_qm_rl_type vport_rl_type)
{
- u32 inc_val;
+ u32 inc_val, upper_bound;
+ upper_bound =
+ (vport_rl_type ==
+ QM_RL_TYPE_QCN) ? QM_GLOBAL_RL_UPPER_BOUND(QM_MAX_LINK_SPEED) :
+ QM_INITIAL_VOQ_BYTE_CRD;
inc_val = QM_RL_INC_VAL(rate_limit);
- if (inc_val > QM_VP_RL_MAX_INC_VAL(rate_limit)) {
- DP_NOTICE(p_hwfn, "Invalid rate limit configuration.\n");
+ if (inc_val > upper_bound) {
+ DP_NOTICE(p_hwfn, "Invalid VPORT rate limit configuration.\n");
return -1;
}
qed_wr(p_hwfn, p_ptt,
QM_REG_RLGLBLCRD + rl_id * 4, (u32)QM_RL_CRD_REG_SIGN_BIT);
+ qed_wr(p_hwfn,
+ p_ptt,
+ QM_REG_RLGLBLUPPERBOUND + rl_id * 4,
+ upper_bound | (u32)QM_RL_CRD_REG_SIGN_BIT);
qed_wr(p_hwfn, p_ptt, QM_REG_RLGLBLINCVAL + rl_id * 4, inc_val);
return 0;
@@ -1013,7 +1136,7 @@ bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
static int qed_dmae_to_grc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
__le32 *p_data, u32 addr, u32 len_in_dwords)
{
- struct qed_dmae_params params = {};
+ struct qed_dmae_params params = { 0 };
u32 *data_cpu;
int rc;
@@ -1066,16 +1189,16 @@ void qed_set_vxlan_enable(struct qed_hwfn *p_hwfn,
/* Update PRS register */
reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
- shift = PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT;
- SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, vxlan_enable);
+ SET_FIELD(reg_val,
+ PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE, vxlan_enable);
qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
if (reg_val) {
reg_val =
- qed_rd(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2);
+ qed_rd(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0);
/* Update output only if tunnel blocks not included. */
if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT)
- qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
+ qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
(u32)PRS_ETH_TUNN_OUTPUT_FORMAT);
}
@@ -1099,18 +1222,20 @@ void qed_set_gre_enable(struct qed_hwfn *p_hwfn,
/* Update PRS register */
reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
- shift = PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT;
- SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, eth_gre_enable);
- shift = PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT;
- SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, ip_gre_enable);
+ SET_FIELD(reg_val,
+ PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE,
+ eth_gre_enable);
+ SET_FIELD(reg_val,
+ PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE,
+ ip_gre_enable);
qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
if (reg_val) {
reg_val =
- qed_rd(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2);
+ qed_rd(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0);
/* Update output only if tunnel blocks not included. */
if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT)
- qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
+ qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
(u32)PRS_ETH_TUNN_OUTPUT_FORMAT);
}
@@ -1148,22 +1273,23 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
bool eth_geneve_enable, bool ip_geneve_enable)
{
u32 reg_val;
- u8 shift;
/* Update PRS register */
reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
- shift = PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT;
- SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, eth_geneve_enable);
- shift = PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT;
- SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, ip_geneve_enable);
+ SET_FIELD(reg_val,
+ PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE,
+ eth_geneve_enable);
+ SET_FIELD(reg_val,
+ PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE,
+ ip_geneve_enable);
qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
if (reg_val) {
reg_val =
- qed_rd(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2);
+ qed_rd(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0);
/* Update output only if tunnel blocks not included. */
if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT)
- qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
+ qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
(u32)PRS_ETH_TUNN_OUTPUT_FORMAT);
}
@@ -1179,16 +1305,16 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
/* Update DORQ registers */
qed_wr(p_hwfn,
p_ptt,
- DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN_K2_E5,
+ DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN_K2,
eth_geneve_enable ? 1 : 0);
qed_wr(p_hwfn,
p_ptt,
- DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN_K2_E5,
+ DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN_K2,
ip_geneve_enable ? 1 : 0);
}
#define PRS_ETH_VXLAN_NO_L2_ENABLE_OFFSET 3
-#define PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT -925189872
+#define PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT 0xC8DAB910
void qed_set_vxlan_no_l2_enable(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, bool enable)
@@ -1208,7 +1334,7 @@ void qed_set_vxlan_no_l2_enable(struct qed_hwfn *p_hwfn,
/* update PRS FIC register */
qed_wr(p_hwfn,
p_ptt,
- PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
+ PRS_REG_OUTPUT_FORMAT_4_0,
(u32)PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT);
} else {
/* clear VXLAN_NO_L2_ENABLE flag */
@@ -1229,7 +1355,7 @@ void qed_set_vxlan_no_l2_enable(struct qed_hwfn *p_hwfn,
void qed_gft_disable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 pf_id)
{
- struct regpair ram_line = { };
+ struct regpair ram_line = { 0 };
/* Disable gft search for PF */
qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 0);
@@ -1621,6 +1747,8 @@ struct phys_mem_desc *qed_fw_overlay_mem_alloc(struct qed_hwfn *p_hwfn,
storm_buf_size = GET_FIELD(hdr->data,
FW_OVERLAY_BUF_HDR_BUF_SIZE);
storm_id = GET_FIELD(hdr->data, FW_OVERLAY_BUF_HDR_STORM_ID);
+ if (storm_id >= NUM_STORMS)
+ break;
storm_mem_desc = allocated_mem + storm_id;
storm_mem_desc->size = storm_buf_size * sizeof(u32);
@@ -1645,7 +1773,7 @@ struct phys_mem_desc *qed_fw_overlay_mem_alloc(struct qed_hwfn *p_hwfn,
/* If memory allocation has failed, free all allocated memory */
if (buf_offset < buf_size) {
- qed_fw_overlay_mem_free(p_hwfn, allocated_mem);
+ qed_fw_overlay_mem_free(p_hwfn, &allocated_mem);
return NULL;
}
@@ -1679,16 +1807,16 @@ void qed_fw_overlay_init_ram(struct qed_hwfn *p_hwfn,
}
void qed_fw_overlay_mem_free(struct qed_hwfn *p_hwfn,
- struct phys_mem_desc *fw_overlay_mem)
+ struct phys_mem_desc **fw_overlay_mem)
{
u8 storm_id;
- if (!fw_overlay_mem)
+ if (!fw_overlay_mem || !(*fw_overlay_mem))
return;
for (storm_id = 0; storm_id < NUM_STORMS; storm_id++) {
struct phys_mem_desc *storm_mem_desc =
- (struct phys_mem_desc *)fw_overlay_mem + storm_id;
+ (struct phys_mem_desc *)*fw_overlay_mem + storm_id;
/* Free Storm's physical memory */
if (storm_mem_desc->virt_addr)
@@ -1699,5 +1827,6 @@ void qed_fw_overlay_mem_free(struct qed_hwfn *p_hwfn,
}
/* Free allocated virtual memory */
- kfree(fw_overlay_mem);
+ kfree(*fw_overlay_mem);
+ *fw_overlay_mem = NULL;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
index 7e6c6389523b..b3bf9899c1a1 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
@@ -15,6 +15,7 @@
#include "qed_hsi.h"
#include "qed_hw.h"
#include "qed_init_ops.h"
+#include "qed_iro_hsi.h"
#include "qed_reg_addr.h"
#include "qed_sriov.h"
@@ -46,30 +47,32 @@ static u32 pxp_global_win[] = {
/* IRO Array */
static const u32 iro_arr[] = {
0x00000000, 0x00000000, 0x00080000,
+ 0x00004478, 0x00000008, 0x00080000,
0x00003288, 0x00000088, 0x00880000,
- 0x000058e8, 0x00000020, 0x00200000,
+ 0x000058a8, 0x00000020, 0x00200000,
+ 0x00003188, 0x00000008, 0x00080000,
0x00000b00, 0x00000008, 0x00040000,
0x00000a80, 0x00000008, 0x00040000,
0x00000000, 0x00000008, 0x00020000,
0x00000080, 0x00000008, 0x00040000,
0x00000084, 0x00000008, 0x00020000,
- 0x00005718, 0x00000004, 0x00040000,
- 0x00004dd0, 0x00000000, 0x00780000,
+ 0x00005798, 0x00000004, 0x00040000,
+ 0x00004e50, 0x00000000, 0x00780000,
0x00003e40, 0x00000000, 0x00780000,
- 0x00004480, 0x00000000, 0x00780000,
+ 0x00004500, 0x00000000, 0x00780000,
0x00003210, 0x00000000, 0x00780000,
0x00003b50, 0x00000000, 0x00780000,
0x00007f58, 0x00000000, 0x00780000,
- 0x00005f58, 0x00000000, 0x00080000,
+ 0x00005fd8, 0x00000000, 0x00080000,
0x00007100, 0x00000000, 0x00080000,
- 0x0000aea0, 0x00000000, 0x00080000,
+ 0x0000af20, 0x00000000, 0x00080000,
0x00004398, 0x00000000, 0x00080000,
0x0000a5a0, 0x00000000, 0x00080000,
0x0000bde8, 0x00000000, 0x00080000,
0x00000020, 0x00000004, 0x00040000,
- 0x000056c8, 0x00000010, 0x00100000,
+ 0x00005688, 0x00000010, 0x00100000,
0x0000c210, 0x00000030, 0x00300000,
- 0x0000b088, 0x00000038, 0x00380000,
+ 0x0000b108, 0x00000038, 0x00380000,
0x00003d20, 0x00000080, 0x00400000,
0x0000bf60, 0x00000000, 0x00040000,
0x00004560, 0x00040080, 0x00040000,
@@ -77,11 +80,11 @@ static const u32 iro_arr[] = {
0x00003d60, 0x00000080, 0x00200000,
0x00008960, 0x00000040, 0x00300000,
0x0000e840, 0x00000060, 0x00600000,
- 0x00004618, 0x00000080, 0x00380000,
- 0x00010738, 0x000000c0, 0x00c00000,
+ 0x00004698, 0x00000080, 0x00380000,
+ 0x000107b8, 0x000000c0, 0x00c00000,
0x000001f8, 0x00000002, 0x00020000,
- 0x0000a2a0, 0x00000000, 0x01080000,
- 0x0000a3a8, 0x00000008, 0x00080000,
+ 0x0000a260, 0x00000000, 0x01080000,
+ 0x0000a368, 0x00000008, 0x00080000,
0x000001c0, 0x00000008, 0x00080000,
0x000001f8, 0x00000008, 0x00080000,
0x00000ac0, 0x00000008, 0x00080000,
@@ -90,39 +93,46 @@ static const u32 iro_arr[] = {
0x00000280, 0x00000008, 0x00080000,
0x00000680, 0x00080018, 0x00080000,
0x00000b78, 0x00080018, 0x00020000,
- 0x0000c640, 0x00000050, 0x003c0000,
- 0x00012038, 0x00000018, 0x00100000,
- 0x00011b00, 0x00000040, 0x00180000,
- 0x000095d0, 0x00000050, 0x00200000,
+ 0x0000c600, 0x00000058, 0x003c0000,
+ 0x00012038, 0x00000020, 0x00100000,
+ 0x00011b00, 0x00000048, 0x00180000,
+ 0x00009650, 0x00000050, 0x00200000,
0x00008b10, 0x00000040, 0x00280000,
- 0x00011640, 0x00000018, 0x00100000,
- 0x0000c828, 0x00000048, 0x00380000,
- 0x00011710, 0x00000020, 0x00200000,
- 0x00004650, 0x00000080, 0x00100000,
+ 0x000116c0, 0x00000018, 0x00100000,
+ 0x0000c808, 0x00000048, 0x00380000,
+ 0x00011790, 0x00000020, 0x00200000,
+ 0x000046d0, 0x00000080, 0x00100000,
0x00003618, 0x00000010, 0x00100000,
- 0x0000a968, 0x00000008, 0x00010000,
+ 0x0000a9e8, 0x00000008, 0x00010000,
0x000097a0, 0x00000008, 0x00010000,
- 0x00011990, 0x00000008, 0x00010000,
- 0x0000f018, 0x00000008, 0x00010000,
- 0x00012628, 0x00000008, 0x00010000,
- 0x00011da8, 0x00000008, 0x00010000,
- 0x0000aa78, 0x00000030, 0x00100000,
- 0x0000d768, 0x00000028, 0x00280000,
- 0x00009a58, 0x00000018, 0x00180000,
- 0x00009bd8, 0x00000008, 0x00080000,
- 0x00013a18, 0x00000008, 0x00080000,
- 0x000126e8, 0x00000018, 0x00180000,
- 0x0000e608, 0x00500288, 0x00100000,
- 0x00012970, 0x00000138, 0x00280000,
+ 0x00011a10, 0x00000008, 0x00010000,
+ 0x0000e9f8, 0x00000008, 0x00010000,
+ 0x00012648, 0x00000008, 0x00010000,
+ 0x000121c8, 0x00000008, 0x00010000,
+ 0x0000af08, 0x00000030, 0x00100000,
+ 0x0000d748, 0x00000028, 0x00280000,
+ 0x00009e68, 0x00000018, 0x00180000,
+ 0x00009fe8, 0x00000008, 0x00080000,
+ 0x00013ea8, 0x00000008, 0x00080000,
+ 0x00012f18, 0x00000018, 0x00180000,
+ 0x0000dfe8, 0x00500288, 0x00100000,
+ 0x000131a0, 0x00000138, 0x00280000,
};
void qed_init_iro_array(struct qed_dev *cdev)
{
- cdev->iro_arr = iro_arr;
+ cdev->iro_arr = iro_arr + E4_IRO_ARR_OFFSET;
}
void qed_init_store_rt_reg(struct qed_hwfn *p_hwfn, u32 rt_offset, u32 val)
{
+ if (rt_offset >= RUNTIME_ARRAY_SIZE) {
+ DP_ERR(p_hwfn,
+ "Avoid storing %u in rt_data at index %u!\n",
+ val, rt_offset);
+ return;
+ }
+
p_hwfn->rt_data.init_val[rt_offset] = val;
p_hwfn->rt_data.b_valid[rt_offset] = true;
}
@@ -132,6 +142,14 @@ void qed_init_store_rt_agg(struct qed_hwfn *p_hwfn,
{
size_t i;
+ if ((rt_offset + size - 1) >= RUNTIME_ARRAY_SIZE) {
+ DP_ERR(p_hwfn,
+ "Avoid storing values in rt_data at indices %u-%u!\n",
+ rt_offset,
+ (u32)(rt_offset + size - 1));
+ return;
+ }
+
for (i = 0; i < size / sizeof(u32); i++) {
p_hwfn->rt_data.init_val[rt_offset + i] = p_val[i];
p_hwfn->rt_data.b_valid[rt_offset + i] = true;
@@ -175,7 +193,7 @@ static int qed_init_rt(struct qed_hwfn *p_hwfn,
return rc;
/* invalidate after writing */
- for (j = i; j < i + segment; j++)
+ for (j = i; j < (u32)(i + segment); j++)
p_valid[j] = false;
/* Jump over the entire segment, including invalid entry */
@@ -245,7 +263,7 @@ static int qed_init_array_dmae(struct qed_hwfn *p_hwfn,
static int qed_init_fill_dmae(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
- u32 addr, u32 fill, u32 fill_count)
+ u32 addr, u32 fill_count)
{
static u32 zero_buffer[DMAE_MAX_RW_SIZE];
struct qed_dmae_params params = {};
@@ -372,7 +390,7 @@ static int qed_init_cmd_wr(struct qed_hwfn *p_hwfn,
case INIT_SRC_ZEROS:
data = le32_to_cpu(p_cmd->args.zeros_count);
if (b_must_dmae || (b_can_dmae && (data >= 64)))
- rc = qed_init_fill_dmae(p_hwfn, p_ptt, addr, 0, data);
+ rc = qed_init_fill_dmae(p_hwfn, p_ptt, addr, data);
else
qed_init_fill(p_hwfn, p_ptt, addr, 0, data);
break;
@@ -419,7 +437,6 @@ static void qed_init_cmd_rd(struct qed_hwfn *p_hwfn,
addr = GET_FIELD(data, INIT_READ_OP_ADDRESS) << 2;
poll = GET_FIELD(data, INIT_READ_OP_POLL_TYPE);
-
val = qed_rd(p_hwfn, p_ptt, addr);
if (poll == INIT_POLL_NONE)
@@ -515,8 +532,7 @@ static u32 qed_init_cmd_mode(struct qed_hwfn *p_hwfn,
INIT_IF_MODE_OP_CMD_OFFSET);
}
-static u32 qed_init_cmd_phase(struct qed_hwfn *p_hwfn,
- struct init_if_phase_op *p_cmd,
+static u32 qed_init_cmd_phase(struct init_if_phase_op *p_cmd,
u32 phase, u32 phase_id)
{
u32 data = le32_to_cpu(p_cmd->phase_data);
@@ -563,7 +579,7 @@ int qed_init_run(struct qed_hwfn *p_hwfn,
modes);
break;
case INIT_OP_IF_PHASE:
- cmd_num += qed_init_cmd_phase(p_hwfn, &cmd->if_phase,
+ cmd_num += qed_init_cmd_phase(&cmd->if_phase,
phase, phase_id);
break;
case INIT_OP_DELAY:
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.h b/drivers/net/ethernet/qlogic/qed/qed_init_ops.h
index a573c8921982..12e5c4e370d4 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_ops.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.h
@@ -12,23 +12,24 @@
#include "qed.h"
/**
- * @brief qed_init_iro_array - init iro_arr.
+ * qed_init_iro_array(): init iro_arr.
*
+ * @cdev: Qed dev pointer.
*
- * @param cdev
+ * Return: Void.
*/
void qed_init_iro_array(struct qed_dev *cdev);
/**
- * @brief qed_init_run - Run the init-sequence.
+ * qed_init_run(): Run the init-sequence.
*
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @phase: Phase.
+ * @phase_id: Phase ID.
+ * @modes: Mode.
*
- * @param p_hwfn
- * @param p_ptt
- * @param phase
- * @param phase_id
- * @param modes
- * @return _qed_status_t
+ * Return: _qed_status_t
*/
int qed_init_run(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
@@ -37,30 +38,31 @@ int qed_init_run(struct qed_hwfn *p_hwfn,
int modes);
/**
- * @brief qed_init_hwfn_allocate - Allocate RT array, Store 'values' ptrs.
+ * qed_init_alloc(): Allocate RT array, Store 'values' ptrs.
*
+ * @p_hwfn: HW device data.
*
- * @param p_hwfn
- *
- * @return _qed_status_t
+ * Return: _qed_status_t.
*/
int qed_init_alloc(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_init_hwfn_deallocate
+ * qed_init_free(): Init HW function deallocate.
*
+ * @p_hwfn: HW device data.
*
- * @param p_hwfn
+ * Return: Void.
*/
void qed_init_free(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_init_store_rt_reg - Store a configuration value in the RT array.
+ * qed_init_store_rt_reg(): Store a configuration value in the RT array.
*
+ * @p_hwfn: HW device data.
+ * @rt_offset: RT offset.
+ * @val: Val.
*
- * @param p_hwfn
- * @param rt_offset
- * @param val
+ * Return: Void.
*/
void qed_init_store_rt_reg(struct qed_hwfn *p_hwfn,
u32 rt_offset,
@@ -72,29 +74,21 @@ void qed_init_store_rt_reg(struct qed_hwfn *p_hwfn,
#define OVERWRITE_RT_REG(hwfn, offset, val) \
qed_init_store_rt_reg(hwfn, offset, val)
-/**
- * @brief
- *
- *
- * @param p_hwfn
- * @param rt_offset
- * @param val
- * @param size
- */
void qed_init_store_rt_agg(struct qed_hwfn *p_hwfn,
u32 rt_offset,
u32 *val,
size_t size);
#define STORE_RT_REG_AGG(hwfn, offset, val) \
- qed_init_store_rt_agg(hwfn, offset, (u32 *)&val, sizeof(val))
+ qed_init_store_rt_agg(hwfn, offset, (u32 *)&(val), sizeof(val))
/**
- * @brief
- * Initialize GTT global windows and set admin window
- * related params of GTT/PTT to default values.
+ * qed_gtt_init(): Initialize GTT global windows and set admin window
+ * related params of GTT/PTT to default values.
+ *
+ * @p_hwfn: HW device data.
*
- * @param p_hwfn
+ * Return Void.
*/
void qed_gtt_init(struct qed_hwfn *p_hwfn);
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
index f78e6055f654..a97f691839e0 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
@@ -36,7 +36,7 @@ struct qed_sb_sp_info {
struct qed_sb_info sb_info;
/* per protocol index data */
- struct qed_pi_info pi_info_arr[PIS_PER_SB_E4];
+ struct qed_pi_info pi_info_arr[PIS_PER_SB];
};
enum qed_attention_type {
@@ -1507,7 +1507,7 @@ static void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn,
else
SET_FIELD(prod, CAU_PI_ENTRY_FSM_SEL, 1);
- sb_offset = igu_sb_id * PIS_PER_SB_E4;
+ sb_offset = igu_sb_id * PIS_PER_SB;
pi_offset = sb_offset + pi_index;
if (p_hwfn->hw_init_done)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h
index c5550e96bbe1..84c17e97f569 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.h
@@ -53,51 +53,54 @@ enum qed_coalescing_fsm {
};
/**
- * @brief qed_int_igu_enable_int - enable device interrupts
+ * qed_int_igu_enable_int(): Enable device interrupts.
*
- * @param p_hwfn
- * @param p_ptt
- * @param int_mode - interrupt mode to use
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @int_mode: Interrupt mode to use.
+ *
+ * Return: Void.
*/
void qed_int_igu_enable_int(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
enum qed_int_mode int_mode);
/**
- * @brief qed_int_igu_disable_int - disable device interrupts
+ * qed_int_igu_disable_int(): Disable device interrupts.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
*
- * @param p_hwfn
- * @param p_ptt
+ * Return: Void.
*/
void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
/**
- * @brief qed_int_igu_read_sisr_reg - Reads the single isr multiple dpc
- * register from igu.
+ * qed_int_igu_read_sisr_reg(): Reads the single isr multiple dpc
+ * register from igu.
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
*
- * @return u64
+ * Return: u64.
*/
u64 qed_int_igu_read_sisr_reg(struct qed_hwfn *p_hwfn);
#define QED_SP_SB_ID 0xffff
/**
- * @brief qed_int_sb_init - Initializes the sb_info structure.
+ * qed_int_sb_init(): Initializes the sb_info structure.
*
- * once the structure is initialized it can be passed to sb related functions.
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @sb_info: points to an uninitialized (but allocated) sb_info structure
+ * @sb_virt_addr: SB Virtual address.
+ * @sb_phy_addr: SB Physial address.
+ * @sb_id: the sb_id to be used (zero based in driver)
+ * should use QED_SP_SB_ID for SP Status block
*
- * @param p_hwfn
- * @param p_ptt
- * @param sb_info points to an uninitialized (but
- * allocated) sb_info structure
- * @param sb_virt_addr
- * @param sb_phy_addr
- * @param sb_id the sb_id to be used (zero based in driver)
- * should use QED_SP_SB_ID for SP Status block
+ * Return: int.
*
- * @return int
+ * Once the structure is initialized it can be passed to sb related functions.
*/
int qed_int_sb_init(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
@@ -106,82 +109,91 @@ int qed_int_sb_init(struct qed_hwfn *p_hwfn,
dma_addr_t sb_phy_addr,
u16 sb_id);
/**
- * @brief qed_int_sb_setup - Setup the sb.
+ * qed_int_sb_setup(): Setup the sb.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @sb_info: Initialized sb_info structure.
*
- * @param p_hwfn
- * @param p_ptt
- * @param sb_info initialized sb_info structure
+ * Return: Void.
*/
void qed_int_sb_setup(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_sb_info *sb_info);
/**
- * @brief qed_int_sb_release - releases the sb_info structure.
+ * qed_int_sb_release(): Releases the sb_info structure.
*
- * once the structure is released, it's memory can be freed
+ * @p_hwfn: HW device data.
+ * @sb_info: Points to an allocated sb_info structure.
+ * @sb_id: The sb_id to be used (zero based in driver)
+ * should never be equal to QED_SP_SB_ID
+ * (SP Status block).
*
- * @param p_hwfn
- * @param sb_info points to an allocated sb_info structure
- * @param sb_id the sb_id to be used (zero based in driver)
- * should never be equal to QED_SP_SB_ID
- * (SP Status block)
+ * Return: int.
*
- * @return int
+ * Once the structure is released, it's memory can be freed.
*/
int qed_int_sb_release(struct qed_hwfn *p_hwfn,
struct qed_sb_info *sb_info,
u16 sb_id);
/**
- * @brief qed_int_sp_dpc - To be called when an interrupt is received on the
- * default status block.
+ * qed_int_sp_dpc(): To be called when an interrupt is received on the
+ * default status block.
*
- * @param p_hwfn - pointer to hwfn
+ * @t: Tasklet.
+ *
+ * Return: Void.
*
*/
void qed_int_sp_dpc(struct tasklet_struct *t);
/**
- * @brief qed_int_get_num_sbs - get the number of status
- * blocks configured for this funciton in the igu.
+ * qed_int_get_num_sbs(): Get the number of status blocks configured
+ * for this funciton in the igu.
*
- * @param p_hwfn
- * @param p_sb_cnt_info
+ * @p_hwfn: HW device data.
+ * @p_sb_cnt_info: Pointer to SB count info.
*
- * @return int - number of status blocks configured
+ * Return: Void.
*/
void qed_int_get_num_sbs(struct qed_hwfn *p_hwfn,
struct qed_sb_cnt_info *p_sb_cnt_info);
/**
- * @brief qed_int_disable_post_isr_release - performs the cleanup post ISR
+ * qed_int_disable_post_isr_release(): Performs the cleanup post ISR
* release. The API need to be called after releasing all slowpath IRQs
* of the device.
*
- * @param cdev
+ * @cdev: Qed dev pointer.
*
+ * Return: Void.
*/
void qed_int_disable_post_isr_release(struct qed_dev *cdev);
/**
- * @brief qed_int_attn_clr_enable - sets whether the general behavior is
+ * qed_int_attn_clr_enable: Sets whether the general behavior is
* preventing attentions from being reasserted, or following the
* attributes of the specific attention.
*
- * @param cdev
- * @param clr_enable
+ * @cdev: Qed dev pointer.
+ * @clr_enable: Clear enable
+ *
+ * Return: Void.
*
*/
void qed_int_attn_clr_enable(struct qed_dev *cdev, bool clr_enable);
/**
- * @brief - Doorbell Recovery handler.
+ * qed_db_rec_handler(): Doorbell Recovery handler.
* Run doorbell recovery in case of PF overflow (and flush DORQ if
* needed).
*
- * @param p_hwfn
- * @param p_ptt
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ *
+ * Return: Int.
*/
int qed_db_rec_handler(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
@@ -192,7 +204,7 @@ int qed_db_rec_handler(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
#define QED_SB_EVENT_MASK 0x0003
#define SB_ALIGNED_SIZE(p_hwfn) \
- ALIGNED_TYPE_SIZE(struct status_block_e4, p_hwfn)
+ ALIGNED_TYPE_SIZE(struct status_block, p_hwfn)
#define QED_SB_INVALID_IDX 0xffff
@@ -223,30 +235,34 @@ struct qed_igu_info {
};
/**
- * @brief - Make sure the IGU CAM reflects the resources provided by MFW
+ * qed_int_igu_reset_cam(): Make sure the IGU CAM reflects the resources
+ * provided by MFW.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
*
- * @param p_hwfn
- * @param p_ptt
+ * Return: Void.
*/
int qed_int_igu_reset_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
/**
- * @brief Translate the weakly-defined client sb-id into an IGU sb-id
+ * qed_get_igu_sb_id(): Translate the weakly-defined client sb-id into
+ * an IGU sb-id
*
- * @param p_hwfn
- * @param sb_id - user provided sb_id
+ * @p_hwfn: HW device data.
+ * @sb_id: user provided sb_id.
*
- * @return an index inside IGU CAM where the SB resides
+ * Return: An index inside IGU CAM where the SB resides.
*/
u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id);
/**
- * @brief return a pointer to an unused valid SB
+ * qed_get_igu_free_sb(): Return a pointer to an unused valid SB
*
- * @param p_hwfn
- * @param b_is_pf - true iff we want a SB belonging to a PF
+ * @p_hwfn: HW device data.
+ * @b_is_pf: True iff we want a SB belonging to a PF.
*
- * @return point to an igu_block, NULL if none is available
+ * Return: Point to an igu_block, NULL if none is available.
*/
struct qed_igu_block *qed_get_igu_free_sb(struct qed_hwfn *p_hwfn,
bool b_is_pf);
@@ -259,15 +275,15 @@ void qed_int_igu_init_pure_rt(struct qed_hwfn *p_hwfn,
void qed_int_igu_init_rt(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_int_igu_read_cam - Reads the IGU CAM.
+ * qed_int_igu_read_cam(): Reads the IGU CAM.
* This function needs to be called during hardware
* prepare. It reads the info from igu cam to know which
* status block is the default / base status block etc.
*
- * @param p_hwfn
- * @param p_ptt
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
*
- * @return int
+ * Return: Int.
*/
int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
@@ -275,24 +291,22 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn,
typedef int (*qed_int_comp_cb_t)(struct qed_hwfn *p_hwfn,
void *cookie);
/**
- * @brief qed_int_register_cb - Register callback func for
- * slowhwfn statusblock.
- *
- * Every protocol that uses the slowhwfn status block
- * should register a callback function that will be called
- * once there is an update of the sp status block.
- *
- * @param p_hwfn
- * @param comp_cb - function to be called when there is an
- * interrupt on the sp sb
- *
- * @param cookie - passed to the callback function
- * @param sb_idx - OUT parameter which gives the chosen index
- * for this protocol.
- * @param p_fw_cons - pointer to the actual address of the
- * consumer for this protocol.
- *
- * @return int
+ * qed_int_register_cb(): Register callback func for slowhwfn statusblock.
+ *
+ * @p_hwfn: HW device data.
+ * @comp_cb: Function to be called when there is an
+ * interrupt on the sp sb
+ * @cookie: Passed to the callback function
+ * @sb_idx: (OUT) parameter which gives the chosen index
+ * for this protocol.
+ * @p_fw_cons: Pointer to the actual address of the
+ * consumer for this protocol.
+ *
+ * Return: Int.
+ *
+ * Every protocol that uses the slowhwfn status block
+ * should register a callback function that will be called
+ * once there is an update of the sp status block.
*/
int qed_int_register_cb(struct qed_hwfn *p_hwfn,
qed_int_comp_cb_t comp_cb,
@@ -301,37 +315,40 @@ int qed_int_register_cb(struct qed_hwfn *p_hwfn,
__le16 **p_fw_cons);
/**
- * @brief qed_int_unregister_cb - Unregisters callback
- * function from sp sb.
- * Partner of qed_int_register_cb -> should be called
- * when no longer required.
+ * qed_int_unregister_cb(): Unregisters callback function from sp sb.
+ *
+ * @p_hwfn: HW device data.
+ * @pi: Producer Index.
*
- * @param p_hwfn
- * @param pi
+ * Return: Int.
*
- * @return int
+ * Partner of qed_int_register_cb -> should be called
+ * when no longer required.
*/
int qed_int_unregister_cb(struct qed_hwfn *p_hwfn,
u8 pi);
/**
- * @brief qed_int_get_sp_sb_id - Get the slowhwfn sb id.
+ * qed_int_get_sp_sb_id(): Get the slowhwfn sb id.
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
*
- * @return u16
+ * Return: u16.
*/
u16 qed_int_get_sp_sb_id(struct qed_hwfn *p_hwfn);
/**
- * @brief Status block cleanup. Should be called for each status
- * block that will be used -> both PF / VF
- *
- * @param p_hwfn
- * @param p_ptt
- * @param igu_sb_id - igu status block id
- * @param opaque - opaque fid of the sb owner.
- * @param b_set - set(1) / clear(0)
+ * qed_int_igu_init_pure_rt_single(): Status block cleanup.
+ * Should be called for each status
+ * block that will be used -> both PF / VF.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @igu_sb_id: IGU status block id.
+ * @opaque: Opaque fid of the sb owner.
+ * @b_set: Set(1) / Clear(0).
+ *
+ * Return: Void.
*/
void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
@@ -340,15 +357,16 @@ void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn,
bool b_set);
/**
- * @brief qed_int_cau_conf - configure cau for a given status
- * block
- *
- * @param p_hwfn
- * @param ptt
- * @param sb_phys
- * @param igu_sb_id
- * @param vf_number
- * @param vf_valid
+ * qed_int_cau_conf_sb(): Configure cau for a given status block.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @sb_phys: SB Physical.
+ * @igu_sb_id: IGU status block id.
+ * @vf_number: VF number
+ * @vf_valid: VF valid or not.
+ *
+ * Return: Void.
*/
void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
@@ -358,52 +376,58 @@ void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn,
u8 vf_valid);
/**
- * @brief qed_int_alloc
+ * qed_int_alloc(): QED interrupt alloc.
*
- * @param p_hwfn
- * @param p_ptt
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
*
- * @return int
+ * Return: Int.
*/
int qed_int_alloc(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
/**
- * @brief qed_int_free
+ * qed_int_free(): QED interrupt free.
+ *
+ * @p_hwfn: HW device data.
*
- * @param p_hwfn
+ * Return: Void.
*/
void qed_int_free(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_int_setup
+ * qed_int_setup(): QED interrupt setup.
*
- * @param p_hwfn
- * @param p_ptt
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ *
+ * Return: Void.
*/
void qed_int_setup(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
/**
- * @brief - Enable Interrupt & Attention for hw function
+ * qed_int_igu_enable(): Enable Interrupt & Attention for hw function.
*
- * @param p_hwfn
- * @param p_ptt
- * @param int_mode
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @int_mode: Interrut mode
*
- * @return int
+ * Return: Int.
*/
int qed_int_igu_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
enum qed_int_mode int_mode);
/**
- * @brief - Initialize CAU status block entry
+ * qed_init_cau_sb_entry(): Initialize CAU status block entry.
+ *
+ * @p_hwfn: HW device data.
+ * @p_sb_entry: Pointer SB entry.
+ * @pf_id: PF number
+ * @vf_number: VF number
+ * @vf_valid: VF valid or not.
*
- * @param p_hwfn
- * @param p_sb_entry
- * @param pf_id
- * @param vf_number
- * @param vf_valid
+ * Return: Void.
*/
void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn,
struct cau_sb_entry *p_sb_entry,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iro_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_iro_hsi.h
new file mode 100644
index 000000000000..3ccdd3b1d8cb
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_iro_hsi.h
@@ -0,0 +1,500 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/* QLogic qed NIC Driver
+ * Copyright (c) 2019-2021 Marvell International Ltd.
+ */
+
+#ifndef _QED_IRO_HSI_H
+#define _QED_IRO_HSI_H
+
+#include <linux/types.h>
+
+enum {
+ IRO_YSTORM_FLOW_CONTROL_MODE_GTT,
+ IRO_PSTORM_PKT_DUPLICATION_CFG,
+ IRO_TSTORM_PORT_STAT,
+ IRO_TSTORM_LL2_PORT_STAT,
+ IRO_TSTORM_PKT_DUPLICATION_CFG,
+ IRO_USTORM_VF_PF_CHANNEL_READY_GTT,
+ IRO_USTORM_FLR_FINAL_ACK_GTT,
+ IRO_USTORM_EQE_CONS_GTT,
+ IRO_USTORM_ETH_QUEUE_ZONE_GTT,
+ IRO_USTORM_COMMON_QUEUE_CONS_GTT,
+ IRO_XSTORM_PQ_INFO,
+ IRO_XSTORM_INTEG_TEST_DATA,
+ IRO_YSTORM_INTEG_TEST_DATA,
+ IRO_PSTORM_INTEG_TEST_DATA,
+ IRO_TSTORM_INTEG_TEST_DATA,
+ IRO_MSTORM_INTEG_TEST_DATA,
+ IRO_USTORM_INTEG_TEST_DATA,
+ IRO_XSTORM_OVERLAY_BUF_ADDR,
+ IRO_YSTORM_OVERLAY_BUF_ADDR,
+ IRO_PSTORM_OVERLAY_BUF_ADDR,
+ IRO_TSTORM_OVERLAY_BUF_ADDR,
+ IRO_MSTORM_OVERLAY_BUF_ADDR,
+ IRO_USTORM_OVERLAY_BUF_ADDR,
+ IRO_TSTORM_LL2_RX_PRODS_GTT,
+ IRO_CORE_LL2_TSTORM_PER_QUEUE_STAT,
+ IRO_CORE_LL2_USTORM_PER_QUEUE_STAT,
+ IRO_CORE_LL2_PSTORM_PER_QUEUE_STAT,
+ IRO_MSTORM_QUEUE_STAT,
+ IRO_MSTORM_TPA_TIMEOUT_US,
+ IRO_MSTORM_ETH_VF_PRODS,
+ IRO_MSTORM_ETH_PF_PRODS_GTT,
+ IRO_MSTORM_ETH_PF_STAT,
+ IRO_USTORM_QUEUE_STAT,
+ IRO_USTORM_ETH_PF_STAT,
+ IRO_PSTORM_QUEUE_STAT,
+ IRO_PSTORM_ETH_PF_STAT,
+ IRO_PSTORM_CTL_FRAME_ETHTYPE_GTT,
+ IRO_TSTORM_ETH_PRS_INPUT,
+ IRO_ETH_RX_RATE_LIMIT,
+ IRO_TSTORM_ETH_RSS_UPDATE_GTT,
+ IRO_XSTORM_ETH_QUEUE_ZONE_GTT,
+ IRO_YSTORM_TOE_CQ_PROD,
+ IRO_USTORM_TOE_CQ_PROD,
+ IRO_USTORM_TOE_GRQ_PROD,
+ IRO_TSTORM_SCSI_CMDQ_CONS_GTT,
+ IRO_TSTORM_SCSI_BDQ_EXT_PROD_GTT,
+ IRO_MSTORM_SCSI_BDQ_EXT_PROD_GTT,
+ IRO_TSTORM_ISCSI_RX_STATS,
+ IRO_MSTORM_ISCSI_RX_STATS,
+ IRO_USTORM_ISCSI_RX_STATS,
+ IRO_XSTORM_ISCSI_TX_STATS,
+ IRO_YSTORM_ISCSI_TX_STATS,
+ IRO_PSTORM_ISCSI_TX_STATS,
+ IRO_TSTORM_FCOE_RX_STATS,
+ IRO_PSTORM_FCOE_TX_STATS,
+ IRO_PSTORM_RDMA_QUEUE_STAT,
+ IRO_TSTORM_RDMA_QUEUE_STAT,
+ IRO_XSTORM_RDMA_ASSERT_LEVEL,
+ IRO_YSTORM_RDMA_ASSERT_LEVEL,
+ IRO_PSTORM_RDMA_ASSERT_LEVEL,
+ IRO_TSTORM_RDMA_ASSERT_LEVEL,
+ IRO_MSTORM_RDMA_ASSERT_LEVEL,
+ IRO_USTORM_RDMA_ASSERT_LEVEL,
+ IRO_XSTORM_IWARP_RXMIT_STATS,
+ IRO_TSTORM_ROCE_EVENTS_STAT,
+ IRO_YSTORM_ROCE_DCQCN_RECEIVED_STATS,
+ IRO_YSTORM_ROCE_ERROR_STATS,
+ IRO_PSTORM_ROCE_DCQCN_SENT_STATS,
+ IRO_USTORM_ROCE_CQE_STATS,
+};
+
+/* Pstorm LiteL2 queue statistics */
+
+#define CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(core_tx_stats_id) \
+ (IRO[IRO_CORE_LL2_PSTORM_PER_QUEUE_STAT].base \
+ + ((core_tx_stats_id) * IRO[IRO_CORE_LL2_PSTORM_PER_QUEUE_STAT].m1))
+#define CORE_LL2_PSTORM_PER_QUEUE_STAT_SIZE \
+ (IRO[IRO_CORE_LL2_PSTORM_PER_QUEUE_STAT].size)
+
+/* Tstorm LightL2 queue statistics */
+#define CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \
+ (IRO[IRO_CORE_LL2_TSTORM_PER_QUEUE_STAT].base \
+ + ((core_rx_queue_id) * IRO[IRO_CORE_LL2_TSTORM_PER_QUEUE_STAT].m1))
+#define CORE_LL2_TSTORM_PER_QUEUE_STAT_SIZE \
+ (IRO[IRO_CORE_LL2_TSTORM_PER_QUEUE_STAT].size)
+
+/* Ustorm LiteL2 queue statistics */
+#define CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \
+ (IRO[IRO_CORE_LL2_USTORM_PER_QUEUE_STAT].base \
+ + ((core_rx_queue_id) * IRO[IRO_CORE_LL2_USTORM_PER_QUEUE_STAT].m1))
+#define CORE_LL2_USTORM_PER_QUEUE_STAT_SIZE \
+ (IRO[IRO_CORE_LL2_USTORM_PER_QUEUE_STAT].size)
+
+/* Tstorm Eth limit Rx rate */
+#define ETH_RX_RATE_LIMIT_OFFSET(pf_id) \
+ (IRO[IRO_ETH_RX_RATE_LIMIT].base \
+ + ((pf_id) * IRO[IRO_ETH_RX_RATE_LIMIT].m1))
+#define ETH_RX_RATE_LIMIT_SIZE (IRO[IRO_ETH_RX_RATE_LIMIT].size)
+
+/* Mstorm ETH PF queues producers */
+#define MSTORM_ETH_PF_PRODS_GTT_OFFSET(queue_id) \
+ (IRO[IRO_MSTORM_ETH_PF_PRODS_GTT].base \
+ + ((queue_id) * IRO[IRO_MSTORM_ETH_PF_PRODS_GTT].m1))
+#define MSTORM_ETH_PF_PRODS_GTT_SIZE (IRO[IRO_MSTORM_ETH_PF_PRODS_GTT].size)
+
+/* Mstorm pf statistics */
+#define MSTORM_ETH_PF_STAT_OFFSET(pf_id) \
+ (IRO[IRO_MSTORM_ETH_PF_STAT].base \
+ + ((pf_id) * IRO[IRO_MSTORM_ETH_PF_STAT].m1))
+#define MSTORM_ETH_PF_STAT_SIZE (IRO[IRO_MSTORM_ETH_PF_STAT].size)
+
+/* Mstorm ETH VF queues producers offset in RAM. Used in default VF zone
+ * size mode.
+ */
+#define MSTORM_ETH_VF_PRODS_OFFSET(vf_id, vf_queue_id) \
+ (IRO[IRO_MSTORM_ETH_VF_PRODS].base \
+ + ((vf_id) * IRO[IRO_MSTORM_ETH_VF_PRODS].m1) \
+ + ((vf_queue_id) * IRO[IRO_MSTORM_ETH_VF_PRODS].m2))
+#define MSTORM_ETH_VF_PRODS_SIZE (IRO[IRO_MSTORM_ETH_VF_PRODS].size)
+
+/* Mstorm Integration Test Data */
+#define MSTORM_INTEG_TEST_DATA_OFFSET (IRO[IRO_MSTORM_INTEG_TEST_DATA].base)
+#define MSTORM_INTEG_TEST_DATA_SIZE (IRO[IRO_MSTORM_INTEG_TEST_DATA].size)
+
+/* Mstorm iSCSI RX stats */
+#define MSTORM_ISCSI_RX_STATS_OFFSET(storage_func_id) \
+ (IRO[IRO_MSTORM_ISCSI_RX_STATS].base \
+ + ((storage_func_id) * IRO[IRO_MSTORM_ISCSI_RX_STATS].m1))
+#define MSTORM_ISCSI_RX_STATS_SIZE (IRO[IRO_MSTORM_ISCSI_RX_STATS].size)
+
+/* Mstorm overlay buffer host address */
+#define MSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[IRO_MSTORM_OVERLAY_BUF_ADDR].base)
+#define MSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[IRO_MSTORM_OVERLAY_BUF_ADDR].size)
+
+/* Mstorm queue statistics */
+#define MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
+ (IRO[IRO_MSTORM_QUEUE_STAT].base \
+ + ((stat_counter_id) * IRO[IRO_MSTORM_QUEUE_STAT].m1))
+#define MSTORM_QUEUE_STAT_SIZ (IRO[IRO_MSTORM_QUEUE_STAT].size)
+
+/* Mstorm error level for assert */
+#define MSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
+ (IRO[IRO_MSTORM_RDMA_ASSERT_LEVEL].base \
+ + ((pf_id) * IRO[IRO_MSTORM_RDMA_ASSERT_LEVEL].m1))
+#define MSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[IRO_MSTORM_RDMA_ASSERT_LEVEL].size)
+
+/* Mstorm bdq-external-producer of given BDQ resource ID, BDqueue-id */
+#define MSTORM_SCSI_BDQ_EXT_PROD_GTT_OFFSET(storage_func_id, bdq_id) \
+ (IRO[IRO_MSTORM_SCSI_BDQ_EXT_PROD_GTT].base \
+ + ((storage_func_id) * IRO[IRO_MSTORM_SCSI_BDQ_EXT_PROD_GTT].m1) \
+ + ((bdq_id) * IRO[IRO_MSTORM_SCSI_BDQ_EXT_PROD_GTT].m2))
+#define MSTORM_SCSI_BDQ_EXT_PROD_GTT_SIZE \
+ (IRO[IRO_MSTORM_SCSI_BDQ_EXT_PROD_GTT].size)
+
+/* TPA agregation timeout in us resolution (on ASIC) */
+#define MSTORM_TPA_TIMEOUT_US_OFFSET (IRO[IRO_MSTORM_TPA_TIMEOUT_US].base)
+#define MSTORM_TPA_TIMEOUT_US_SIZE (IRO[IRO_MSTORM_TPA_TIMEOUT_US].size)
+
+/* Control frame's EthType configuration for TX control frame security */
+#define PSTORM_CTL_FRAME_ETHTYPE_GTT_OFFSET(ethtype_id) \
+ (IRO[IRO_PSTORM_CTL_FRAME_ETHTYPE_GTT].base \
+ + ((ethtype_id) * IRO[IRO_PSTORM_CTL_FRAME_ETHTYPE_GTT].m1))
+#define PSTORM_CTL_FRAME_ETHTYPE_GTT_SIZE \
+ (IRO[IRO_PSTORM_CTL_FRAME_ETHTYPE_GTT].size)
+
+/* Pstorm pf statistics */
+#define PSTORM_ETH_PF_STAT_OFFSET(pf_id) \
+ (IRO[IRO_PSTORM_ETH_PF_STAT].base \
+ + ((pf_id) * IRO[IRO_PSTORM_ETH_PF_STAT].m1))
+#define PSTORM_ETH_PF_STAT_SIZE (IRO[IRO_PSTORM_ETH_PF_STAT].size)
+
+/* Pstorm FCoE TX stats */
+#define PSTORM_FCOE_TX_STATS_OFFSET(pf_id) \
+ (IRO[IRO_PSTORM_FCOE_TX_STATS].base \
+ + ((pf_id) * IRO[IRO_PSTORM_FCOE_TX_STATS].m1))
+#define PSTORM_FCOE_TX_STATS_SIZE (IRO[IRO_PSTORM_FCOE_TX_STATS].size)
+
+/* Pstorm Integration Test Data */
+#define PSTORM_INTEG_TEST_DATA_OFFSET (IRO[IRO_PSTORM_INTEG_TEST_DATA].base)
+#define PSTORM_INTEG_TEST_DATA_SIZE (IRO[IRO_PSTORM_INTEG_TEST_DATA].size)
+
+/* Pstorm iSCSI TX stats */
+#define PSTORM_ISCSI_TX_STATS_OFFSET(storage_func_id) \
+ (IRO[IRO_PSTORM_ISCSI_TX_STATS].base \
+ + ((storage_func_id) * IRO[IRO_PSTORM_ISCSI_TX_STATS].m1))
+#define PSTORM_ISCSI_TX_STATS_SIZE (IRO[IRO_PSTORM_ISCSI_TX_STATS].size)
+
+/* Pstorm overlay buffer host address */
+#define PSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[IRO_PSTORM_OVERLAY_BUF_ADDR].base)
+#define PSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[IRO_PSTORM_OVERLAY_BUF_ADDR].size)
+
+/* Pstorm LL2 packet duplication configuration. Use pstorm_pkt_dup_cfg
+ * data type.
+ */
+#define PSTORM_PKT_DUPLICATION_CFG_OFFSET(pf_id) \
+ (IRO[IRO_PSTORM_PKT_DUPLICATION_CFG].base \
+ + ((pf_id) * IRO[IRO_PSTORM_PKT_DUPLICATION_CFG].m1))
+#define PSTORM_PKT_DUPLICATION_CFG_SIZE \
+ (IRO[IRO_PSTORM_PKT_DUPLICATION_CFG].size)
+
+/* Pstorm queue statistics */
+#define PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
+ (IRO[IRO_PSTORM_QUEUE_STAT].base \
+ + ((stat_counter_id) * IRO[IRO_PSTORM_QUEUE_STAT].m1))
+#define PSTORM_QUEUE_STAT_SIZE (IRO[IRO_PSTORM_QUEUE_STAT].size)
+
+/* Pstorm error level for assert */
+#define PSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
+ (IRO[IRO_PSTORM_RDMA_ASSERT_LEVEL].base \
+ + ((pf_id) * IRO[IRO_PSTORM_RDMA_ASSERT_LEVEL].m1))
+#define PSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[IRO_PSTORM_RDMA_ASSERT_LEVEL].size)
+
+/* Pstorm RDMA queue statistics */
+#define PSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \
+ (IRO[IRO_PSTORM_RDMA_QUEUE_STAT].base \
+ + ((rdma_stat_counter_id) * IRO[IRO_PSTORM_RDMA_QUEUE_STAT].m1))
+#define PSTORM_RDMA_QUEUE_STAT_SIZE (IRO[IRO_PSTORM_RDMA_QUEUE_STAT].size)
+
+/* DCQCN Sent Statistics */
+#define PSTORM_ROCE_DCQCN_SENT_STATS_OFFSET(roce_pf_id) \
+ (IRO[IRO_PSTORM_ROCE_DCQCN_SENT_STATS].base \
+ + ((roce_pf_id) * IRO[IRO_PSTORM_ROCE_DCQCN_SENT_STATS].m1))
+#define PSTORM_ROCE_DCQCN_SENT_STATS_SIZE \
+ (IRO[IRO_PSTORM_ROCE_DCQCN_SENT_STATS].size)
+
+/* Tstorm last parser message */
+#define TSTORM_ETH_PRS_INPUT_OFFSET (IRO[IRO_TSTORM_ETH_PRS_INPUT].base)
+#define TSTORM_ETH_PRS_INPUT_SIZE (IRO[IRO_TSTORM_ETH_PRS_INPUT].size)
+
+/* RSS indirection table entry update command per PF offset in TSTORM PF BAR0.
+ * Use eth_tstorm_rss_update_data for update.
+ */
+#define TSTORM_ETH_RSS_UPDATE_GTT_OFFSET(pf_id) \
+ (IRO[IRO_TSTORM_ETH_RSS_UPDATE_GTT].base \
+ + ((pf_id) * IRO[IRO_TSTORM_ETH_RSS_UPDATE_GTT].m1))
+#define TSTORM_ETH_RSS_UPDATE_GTT_SIZE\
+ (IRO[IRO_TSTORM_ETH_RSS_UPDATE_GTT].size)
+
+/* Tstorm FCoE RX stats */
+#define TSTORM_FCOE_RX_STATS_OFFSET(pf_id) \
+ (IRO[IRO_TSTORM_FCOE_RX_STATS].base \
+ + ((pf_id) * IRO[IRO_TSTORM_FCOE_RX_STATS].m1))
+#define TSTORM_FCOE_RX_STATS_SIZE (IRO[IRO_TSTORM_FCOE_RX_STATS].size)
+
+/* Tstorm Integration Test Data */
+#define TSTORM_INTEG_TEST_DATA_OFFSET (IRO[IRO_TSTORM_INTEG_TEST_DATA].base)
+#define TSTORM_INTEG_TEST_DATA_SIZE (IRO[IRO_TSTORM_INTEG_TEST_DATA].size)
+
+/* Tstorm iSCSI RX stats */
+#define TSTORM_ISCSI_RX_STATS_OFFSET(storage_func_id) \
+ (IRO[IRO_TSTORM_ISCSI_RX_STATS].base \
+ + ((storage_func_id) * IRO[IRO_TSTORM_ISCSI_RX_STATS].m1))
+#define TSTORM_ISCSI_RX_STATS_SIZE (IRO[IRO_TSTORM_ISCSI_RX_STATS].size)
+
+/* Tstorm ll2 port statistics */
+#define TSTORM_LL2_PORT_STAT_OFFSET(port_id) \
+ (IRO[IRO_TSTORM_LL2_PORT_STAT].base \
+ + ((port_id) * IRO[IRO_TSTORM_LL2_PORT_STAT].m1))
+#define TSTORM_LL2_PORT_STAT_SIZE (IRO[IRO_TSTORM_LL2_PORT_STAT].size)
+
+/* Tstorm producers */
+#define TSTORM_LL2_RX_PRODS_GTT_OFFSET(core_rx_queue_id) \
+ (IRO[IRO_TSTORM_LL2_RX_PRODS_GTT].base \
+ + ((core_rx_queue_id) * IRO[IRO_TSTORM_LL2_RX_PRODS_GTT].m1))
+#define TSTORM_LL2_RX_PRODS_GTT_SIZE (IRO[IRO_TSTORM_LL2_RX_PRODS_GTT].size)
+
+/* Tstorm overlay buffer host address */
+#define TSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[IRO_TSTORM_OVERLAY_BUF_ADDR].base)
+
+#define TSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[IRO_TSTORM_OVERLAY_BUF_ADDR].size)
+
+/* Tstorm LL2 packet duplication configuration.
+ * Use tstorm_pkt_dup_cfg data type.
+ */
+#define TSTORM_PKT_DUPLICATION_CFG_OFFSET(pf_id) \
+ (IRO[IRO_TSTORM_PKT_DUPLICATION_CFG].base \
+ + ((pf_id) * IRO[IRO_TSTORM_PKT_DUPLICATION_CFG].m1))
+#define TSTORM_PKT_DUPLICATION_CFG_SIZE \
+ (IRO[IRO_TSTORM_PKT_DUPLICATION_CFG].size)
+
+/* Tstorm port statistics */
+#define TSTORM_PORT_STAT_OFFSET(port_id) \
+ (IRO[IRO_TSTORM_PORT_STAT].base \
+ + ((port_id) * IRO[IRO_TSTORM_PORT_STAT].m1))
+#define TSTORM_PORT_STAT_SIZE (IRO[IRO_TSTORM_PORT_STAT].size)
+
+/* Tstorm error level for assert */
+#define TSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
+ (IRO[IRO_TSTORM_RDMA_ASSERT_LEVEL].base \
+ + ((pf_id) * IRO[IRO_TSTORM_RDMA_ASSERT_LEVEL].m1))
+#define TSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[IRO_TSTORM_RDMA_ASSERT_LEVEL].size)
+
+/* Tstorm RDMA queue statistics */
+#define TSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \
+ (IRO[IRO_TSTORM_RDMA_QUEUE_STAT].base \
+ + ((rdma_stat_counter_id) * IRO[IRO_TSTORM_RDMA_QUEUE_STAT].m1))
+#define TSTORM_RDMA_QUEUE_STAT_SIZE (IRO[IRO_TSTORM_RDMA_QUEUE_STAT].size)
+
+/* Tstorm RoCE Event Statistics */
+#define TSTORM_ROCE_EVENTS_STAT_OFFSET(roce_pf_id) \
+ (IRO[IRO_TSTORM_ROCE_EVENTS_STAT].base \
+ + ((roce_pf_id) * IRO[IRO_TSTORM_ROCE_EVENTS_STAT].m1))
+#define TSTORM_ROCE_EVENTS_STAT_SIZE (IRO[IRO_TSTORM_ROCE_EVENTS_STAT].size)
+
+/* Tstorm (reflects M-Storm) bdq-external-producer of given function ID,
+ * BDqueue-id.
+ */
+#define TSTORM_SCSI_BDQ_EXT_PROD_GTT_OFFSET(storage_func_id, bdq_id) \
+ (IRO[IRO_TSTORM_SCSI_BDQ_EXT_PROD_GTT].base \
+ + ((storage_func_id) * IRO[IRO_TSTORM_SCSI_BDQ_EXT_PROD_GTT].m1) \
+ + ((bdq_id) * IRO[IRO_TSTORM_SCSI_BDQ_EXT_PROD_GTT].m2))
+#define TSTORM_SCSI_BDQ_EXT_PROD_GTT_SIZE \
+ (IRO[IRO_TSTORM_SCSI_BDQ_EXT_PROD_GTT].size)
+
+/* Tstorm cmdq-cons of given command queue-id */
+#define TSTORM_SCSI_CMDQ_CONS_GTT_OFFSET(cmdq_queue_id) \
+ (IRO[IRO_TSTORM_SCSI_CMDQ_CONS_GTT].base \
+ + ((cmdq_queue_id) * IRO[IRO_TSTORM_SCSI_CMDQ_CONS_GTT].m1))
+#define TSTORM_SCSI_CMDQ_CONS_GTT_SIZE \
+ (IRO[IRO_TSTORM_SCSI_CMDQ_CONS_GTT].size)
+
+/* Ustorm Common Queue ring consumer */
+#define USTORM_COMMON_QUEUE_CONS_GTT_OFFSET(queue_zone_id) \
+ (IRO[IRO_USTORM_COMMON_QUEUE_CONS_GTT].base \
+ + ((queue_zone_id) * IRO[IRO_USTORM_COMMON_QUEUE_CONS_GTT].m1))
+#define USTORM_COMMON_QUEUE_CONS_GTT_SIZE \
+ (IRO[IRO_USTORM_COMMON_QUEUE_CONS_GTT].size)
+
+/* Ustorm Event ring consumer */
+#define USTORM_EQE_CONS_GTT_OFFSET(pf_id) \
+ (IRO[IRO_USTORM_EQE_CONS_GTT].base \
+ + ((pf_id) * IRO[IRO_USTORM_EQE_CONS_GTT].m1))
+#define USTORM_EQE_CONS_GTT_SIZE (IRO[IRO_USTORM_EQE_CONS_GTT].size)
+
+/* Ustorm pf statistics */
+#define USTORM_ETH_PF_STAT_OFFSET(pf_id) \
+ (IRO[IRO_USTORM_ETH_PF_STAT].base \
+ + ((pf_id) * IRO[IRO_USTORM_ETH_PF_STAT].m1))
+#define USTORM_ETH_PF_STAT_SIZE (IRO[IRO_USTORM_ETH_PF_STAT].size)
+
+/* Ustorm eth queue zone */
+#define USTORM_ETH_QUEUE_ZONE_GTT_OFFSET(queue_zone_id) \
+ (IRO[IRO_USTORM_ETH_QUEUE_ZONE_GTT].base \
+ + ((queue_zone_id) * IRO[IRO_USTORM_ETH_QUEUE_ZONE_GTT].m1))
+#define USTORM_ETH_QUEUE_ZONE_GTT_SIZE (IRO[IRO_USTORM_ETH_QUEUE_ZONE_GTT].size)
+
+/* Ustorm Final flr cleanup ack */
+#define USTORM_FLR_FINAL_ACK_GTT_OFFSET(pf_id) \
+ (IRO[IRO_USTORM_FLR_FINAL_ACK_GTT].base \
+ + ((pf_id) * IRO[IRO_USTORM_FLR_FINAL_ACK_GTT].m1))
+#define USTORM_FLR_FINAL_ACK_GTT_SIZE (IRO[IRO_USTORM_FLR_FINAL_ACK_GTT].size)
+
+/* Ustorm Integration Test Data */
+#define USTORM_INTEG_TEST_DATA_OFFSET (IRO[IRO_USTORM_INTEG_TEST_DATA].base)
+#define USTORM_INTEG_TEST_DATA_SIZE (IRO[IRO_USTORM_INTEG_TEST_DATA].size)
+
+/* Ustorm iSCSI RX stats */
+#define USTORM_ISCSI_RX_STATS_OFFSET(storage_func_id) \
+ (IRO[IRO_USTORM_ISCSI_RX_STATS].base \
+ + ((storage_func_id) * IRO[IRO_USTORM_ISCSI_RX_STATS].m1))
+#define USTORM_ISCSI_RX_STATS_SIZE (IRO[IRO_USTORM_ISCSI_RX_STATS].size)
+
+/* Ustorm overlay buffer host address */
+#define USTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[IRO_USTORM_OVERLAY_BUF_ADDR].base)
+#define USTORM_OVERLAY_BUF_ADDR_SIZE (IRO[IRO_USTORM_OVERLAY_BUF_ADDR].size)
+
+/* Ustorm queue statistics */
+#define USTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
+ (IRO[IRO_USTORM_QUEUE_STAT].base \
+ + ((stat_counter_id) * IRO[IRO_USTORM_QUEUE_STAT].m1))
+#define USTORM_QUEUE_STAT_SIZE (IRO[IRO_USTORM_QUEUE_STAT].size)
+
+/* Ustorm error level for assert */
+#define USTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
+ (IRO[IRO_USTORM_RDMA_ASSERT_LEVEL].base \
+ + ((pf_id) * IRO[IRO_USTORM_RDMA_ASSERT_LEVEL].m1))
+#define USTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[IRO_USTORM_RDMA_ASSERT_LEVEL].size)
+
+/* RoCE CQEs Statistics */
+#define USTORM_ROCE_CQE_STATS_OFFSET(roce_pf_id) \
+ (IRO[IRO_USTORM_ROCE_CQE_STATS].base \
+ + ((roce_pf_id) * IRO[IRO_USTORM_ROCE_CQE_STATS].m1))
+#define USTORM_ROCE_CQE_STATS_SIZE (IRO[IRO_USTORM_ROCE_CQE_STATS].size)
+
+/* Ustorm cqe producer */
+#define USTORM_TOE_CQ_PROD_OFFSET(rss_id) \
+ (IRO[IRO_USTORM_TOE_CQ_PROD].base \
+ + ((rss_id) * IRO[IRO_USTORM_TOE_CQ_PROD].m1))
+#define USTORM_TOE_CQ_PROD_SIZE (IRO[IRO_USTORM_TOE_CQ_PROD].size)
+
+/* Ustorm grq producer */
+#define USTORM_TOE_GRQ_PROD_OFFSET(pf_id) \
+ (IRO[IRO_USTORM_TOE_GRQ_PROD].base \
+ + ((pf_id) * IRO[IRO_USTORM_TOE_GRQ_PROD].m1))
+#define USTORM_TOE_GRQ_PROD_SIZE (IRO[IRO_USTORM_TOE_GRQ_PROD].size)
+
+/* Ustorm VF-PF Channel ready flag */
+#define USTORM_VF_PF_CHANNEL_READY_GTT_OFFSET(vf_id) \
+ (IRO[IRO_USTORM_VF_PF_CHANNEL_READY_GTT].base \
+ + ((vf_id) * IRO[IRO_USTORM_VF_PF_CHANNEL_READY_GTT].m1))
+#define USTORM_VF_PF_CHANNEL_READY_GTT_SIZE \
+ (IRO[IRO_USTORM_VF_PF_CHANNEL_READY_GTT].size)
+
+/* Xstorm queue zone */
+#define XSTORM_ETH_QUEUE_ZONE_GTT_OFFSET(queue_id) \
+ (IRO[IRO_XSTORM_ETH_QUEUE_ZONE_GTT].base \
+ + ((queue_id) * IRO[IRO_XSTORM_ETH_QUEUE_ZONE_GTT].m1))
+#define XSTORM_ETH_QUEUE_ZONE_GTT_SIZE (IRO[IRO_XSTORM_ETH_QUEUE_ZONE_GTT].size)
+
+/* Xstorm Integration Test Data */
+#define XSTORM_INTEG_TEST_DATA_OFFSET (IRO[IRO_XSTORM_INTEG_TEST_DATA].base)
+#define XSTORM_INTEG_TEST_DATA_SIZE (IRO[IRO_XSTORM_INTEG_TEST_DATA].size)
+
+/* Xstorm iSCSI TX stats */
+#define XSTORM_ISCSI_TX_STATS_OFFSET(storage_func_id) \
+ (IRO[IRO_XSTORM_ISCSI_TX_STATS].base \
+ + ((storage_func_id) * IRO[IRO_XSTORM_ISCSI_TX_STATS].m1))
+#define XSTORM_ISCSI_TX_STATS_SIZE (IRO[IRO_XSTORM_ISCSI_TX_STATS].size)
+
+/* Xstorm iWARP rxmit stats */
+#define XSTORM_IWARP_RXMIT_STATS_OFFSET(pf_id) \
+ (IRO[IRO_XSTORM_IWARP_RXMIT_STATS].base \
+ + ((pf_id) * IRO[IRO_XSTORM_IWARP_RXMIT_STATS].m1))
+#define XSTORM_IWARP_RXMIT_STATS_SIZE (IRO[IRO_XSTORM_IWARP_RXMIT_STATS].size)
+
+/* Xstorm overlay buffer host address */
+#define XSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[IRO_XSTORM_OVERLAY_BUF_ADDR].base)
+#define XSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[IRO_XSTORM_OVERLAY_BUF_ADDR].size)
+
+/* Xstorm common PQ info */
+#define XSTORM_PQ_INFO_OFFSET(pq_id) \
+ (IRO[IRO_XSTORM_PQ_INFO].base \
+ + ((pq_id) * IRO[IRO_XSTORM_PQ_INFO].m1))
+#define XSTORM_PQ_INFO_SIZE (IRO[IRO_XSTORM_PQ_INFO].size)
+
+/* Xstorm error level for assert */
+#define XSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
+ (IRO[IRO_XSTORM_RDMA_ASSERT_LEVEL].base \
+ + ((pf_id) * IRO[IRO_XSTORM_RDMA_ASSERT_LEVEL].m1))
+#define XSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[IRO_XSTORM_RDMA_ASSERT_LEVEL].size)
+
+/* Ystorm flow control mode. Use enum fw_flow_ctrl_mode */
+#define YSTORM_FLOW_CONTROL_MODE_GTT_OFFSET \
+ (IRO[IRO_YSTORM_FLOW_CONTROL_MODE_GTT].base)
+#define YSTORM_FLOW_CONTROL_MODE_GTT_SIZE \
+ (IRO[IRO_YSTORM_FLOW_CONTROL_MODE_GTT].size)
+
+/* Ystorm Integration Test Data */
+#define YSTORM_INTEG_TEST_DATA_OFFSET (IRO[IRO_YSTORM_INTEG_TEST_DATA].base)
+#define YSTORM_INTEG_TEST_DATA_SIZE (IRO[IRO_YSTORM_INTEG_TEST_DATA].size)
+
+/* Ystorm iSCSI TX stats */
+#define YSTORM_ISCSI_TX_STATS_OFFSET(storage_func_id) \
+ (IRO[IRO_YSTORM_ISCSI_TX_STATS].base \
+ + ((storage_func_id) * IRO[IRO_YSTORM_ISCSI_TX_STATS].m1))
+#define YSTORM_ISCSI_TX_STATS_SIZE (IRO[IRO_YSTORM_ISCSI_TX_STATS].size)
+
+/* Ystorm overlay buffer host address */
+#define YSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[IRO_YSTORM_OVERLAY_BUF_ADDR].base)
+#define YSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[IRO_YSTORM_OVERLAY_BUF_ADDR].size)
+
+/* Ystorm error level for assert */
+#define YSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
+ (IRO[IRO_YSTORM_RDMA_ASSERT_LEVEL].base \
+ + ((pf_id) * IRO[IRO_YSTORM_RDMA_ASSERT_LEVEL].m1))
+#define YSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[IRO_YSTORM_RDMA_ASSERT_LEVEL].size)
+
+/* DCQCN Received Statistics */
+#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_OFFSET(roce_pf_id) \
+ (IRO[IRO_YSTORM_ROCE_DCQCN_RECEIVED_STATS].base \
+ + ((roce_pf_id) * IRO[IRO_YSTORM_ROCE_DCQCN_RECEIVED_STATS].m1))
+#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_SIZE \
+ (IRO[IRO_YSTORM_ROCE_DCQCN_RECEIVED_STATS].size)
+
+/* RoCE Error Statistics */
+#define YSTORM_ROCE_ERROR_STATS_OFFSET(roce_pf_id) \
+ (IRO[IRO_YSTORM_ROCE_ERROR_STATS].base \
+ + ((roce_pf_id) * IRO[IRO_YSTORM_ROCE_ERROR_STATS].m1))
+#define YSTORM_ROCE_ERROR_STATS_SIZE (IRO[IRO_YSTORM_ROCE_ERROR_STATS].size)
+
+/* Ystorm cqe producer */
+#define YSTORM_TOE_CQ_PROD_OFFSET(rss_id) \
+ (IRO[IRO_YSTORM_TOE_CQ_PROD].base \
+ + ((rss_id) * IRO[IRO_YSTORM_TOE_CQ_PROD].m1))
+#define YSTORM_TOE_CQ_PROD_SIZE (IRO[IRO_YSTORM_TOE_CQ_PROD].size)
+
+/* Per-chip offsets in iro_arr in dwords */
+#define E4_IRO_ARR_OFFSET 0
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
index db926d8b3033..511ab214eb9c 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
@@ -29,6 +29,7 @@
#include "qed_hsi.h"
#include "qed_hw.h"
#include "qed_int.h"
+#include "qed_iro_hsi.h"
#include "qed_iscsi.h"
#include "qed_ll2.h"
#include "qed_mcp.h"
@@ -627,10 +628,9 @@ static void __iomem *qed_iscsi_get_primary_bdq_prod(struct qed_hwfn *p_hwfn,
{
if (RESC_NUM(p_hwfn, QED_BDQ)) {
return (u8 __iomem *)p_hwfn->regview +
- GTT_BAR0_MAP_REG_MSDM_RAM +
- MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(RESC_START(p_hwfn,
- QED_BDQ),
- bdq_id);
+ GET_GTT_BDQ_REG_ADDR(GTT_BAR0_MAP_REG_MSDM_RAM,
+ MSTORM_SCSI_BDQ_EXT_PROD,
+ RESC_START(p_hwfn, QED_BDQ), bdq_id);
} else {
DP_NOTICE(p_hwfn, "BDQ is not allocated!\n");
return NULL;
@@ -642,10 +642,9 @@ static void __iomem *qed_iscsi_get_secondary_bdq_prod(struct qed_hwfn *p_hwfn,
{
if (RESC_NUM(p_hwfn, QED_BDQ)) {
return (u8 __iomem *)p_hwfn->regview +
- GTT_BAR0_MAP_REG_TSDM_RAM +
- TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(RESC_START(p_hwfn,
- QED_BDQ),
- bdq_id);
+ GET_GTT_BDQ_REG_ADDR(GTT_BAR0_MAP_REG_TSDM_RAM,
+ TSTORM_SCSI_BDQ_EXT_PROD,
+ RESC_START(p_hwfn, QED_BDQ), bdq_id);
} else {
DP_NOTICE(p_hwfn, "BDQ is not allocated!\n");
return NULL;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.h b/drivers/net/ethernet/qlogic/qed/qed_iscsi.h
index dab7a5d09f87..dec2b00259d4 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.h
@@ -34,10 +34,13 @@ void qed_iscsi_setup(struct qed_hwfn *p_hwfn);
void qed_iscsi_free(struct qed_hwfn *p_hwfn);
/**
- * @brief - Fills provided statistics struct with statistics.
+ * qed_get_protocol_stats_iscsi(): Fills provided statistics
+ * struct with statistics.
*
- * @param cdev
- * @param stats - points to struct that will be filled with statistics.
+ * @cdev: Qed dev pointer.
+ * @stats: Points to struct that will be filled with statistics.
+ *
+ * Return: Void.
*/
void qed_get_protocol_stats_iscsi(struct qed_dev *cdev,
struct qed_mcp_iscsi_stats *stats);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
index 186d0048a9d1..1d1d4caad680 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
@@ -114,6 +114,8 @@ qed_iwarp_init_fw_ramrod(struct qed_hwfn *p_hwfn,
RESC_START(p_hwfn, QED_LL2_RAM_QUEUE) +
p_hwfn->p_rdma_info->iwarp.ll2_ooo_handle;
+ p_ramrod->tcp.tx_sws_timer = cpu_to_le16(QED_TX_SWS_TIMER_DFLT);
+ p_ramrod->tcp.two_msl_timer = cpu_to_le32(QED_TWO_MSL_TIMER_DFLT);
p_ramrod->tcp.max_fin_rt = QED_IWARP_MAX_FIN_RT_DEFAULT;
return;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
index dfaf10edfabf..2edd6bf64a3c 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
@@ -28,6 +28,7 @@
#include "qed_dev_api.h"
#include <linux/qed/qed_eth_if.h>
#include "qed_hsi.h"
+#include "qed_iro_hsi.h"
#include "qed_hw.h"
#include "qed_int.h"
#include "qed_l2.h"
@@ -37,7 +38,6 @@
#include "qed_sp.h"
#include "qed_sriov.h"
-
#define QED_MAX_SGES_NUM 16
#define CRC32_POLY 0x1edc6f41
@@ -904,9 +904,10 @@ qed_eth_pf_rx_queue_start(struct qed_hwfn *p_hwfn,
{
u32 init_prod_val = 0;
- *pp_prod = p_hwfn->regview +
- GTT_BAR0_MAP_REG_MSDM_RAM +
- MSTORM_ETH_PF_PRODS_OFFSET(p_cid->abs.queue_id);
+ *pp_prod = (u8 __iomem *)
+ p_hwfn->regview +
+ GET_GTT_REG_ADDR(GTT_BAR0_MAP_REG_MSDM_RAM,
+ MSTORM_ETH_PF_PRODS, p_cid->abs.queue_id);
/* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
__internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32),
@@ -1111,7 +1112,6 @@ qed_eth_pf_tx_queue_start(struct qed_hwfn *p_hwfn,
{
int rc;
-
rc = qed_eth_txq_start_ramrod(p_hwfn, p_cid,
pbl_addr, pbl_size,
qed_get_cm_pq_idx_mcos(p_hwfn, tc));
@@ -2010,7 +2010,7 @@ qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn,
struct qed_spq_comp_cb *p_cb,
struct qed_ntuple_filter_params *p_params)
{
- struct rx_update_gft_filter_data *p_ramrod = NULL;
+ struct rx_update_gft_filter_ramrod_data *p_ramrod = NULL;
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
u16 abs_rx_q_id = 0;
@@ -2031,7 +2031,7 @@ qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn,
}
rc = qed_sp_init_request(p_hwfn, &p_ent,
- ETH_RAMROD_GFT_UPDATE_FILTER,
+ ETH_RAMROD_RX_UPDATE_GFT_FILTER,
PROTOCOLID_ETH, &init_data);
if (rc)
return rc;
@@ -2100,7 +2100,7 @@ int qed_get_rxq_coalesce(struct qed_hwfn *p_hwfn,
CAU_SB_ENTRY_TIMER_RES0);
address = BAR0_MAP_REG_USDM_RAM +
- USTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id);
+ USTORM_ETH_QUEUE_ZONE_GTT_OFFSET(p_cid->abs.queue_id);
coalesce = qed_rd(p_hwfn, p_ptt, address);
is_valid = GET_FIELD(coalesce, COALESCING_TIMESET_VALID);
@@ -2134,7 +2134,7 @@ int qed_get_txq_coalesce(struct qed_hwfn *p_hwfn,
CAU_SB_ENTRY_TIMER_RES1);
address = BAR0_MAP_REG_XSDM_RAM +
- XSTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id);
+ XSTORM_ETH_QUEUE_ZONE_GTT_OFFSET(p_cid->abs.queue_id);
coalesce = qed_rd(p_hwfn, p_ptt, address);
is_valid = GET_FIELD(coalesce, COALESCING_TIMESET_VALID);
@@ -2763,25 +2763,6 @@ static int qed_configure_filter_mcast(struct qed_dev *cdev,
return qed_filter_mcast_cmd(cdev, &mcast, QED_SPQ_MODE_CB, NULL);
}
-static int qed_configure_filter(struct qed_dev *cdev,
- struct qed_filter_params *params)
-{
- enum qed_filter_rx_mode_type accept_flags;
-
- switch (params->type) {
- case QED_FILTER_TYPE_UCAST:
- return qed_configure_filter_ucast(cdev, &params->filter.ucast);
- case QED_FILTER_TYPE_MCAST:
- return qed_configure_filter_mcast(cdev, &params->filter.mcast);
- case QED_FILTER_TYPE_RX_MODE:
- accept_flags = params->filter.accept_flags;
- return qed_configure_filter_rx_mode(cdev, accept_flags);
- default:
- DP_NOTICE(cdev, "Unknown filter type %d\n", (int)params->type);
- return -EINVAL;
- }
-}
-
static int qed_configure_arfs_searcher(struct qed_dev *cdev,
enum qed_filter_config_mode mode)
{
@@ -2867,7 +2848,7 @@ static int qed_fp_cqe_completion(struct qed_dev *dev,
cqe);
}
-static int qed_req_bulletin_update_mac(struct qed_dev *cdev, u8 *mac)
+static int qed_req_bulletin_update_mac(struct qed_dev *cdev, const u8 *mac)
{
int i, ret;
@@ -2904,7 +2885,9 @@ static const struct qed_eth_ops qed_eth_ops_pass = {
.q_rx_stop = &qed_stop_rxq,
.q_tx_start = &qed_start_txq,
.q_tx_stop = &qed_stop_txq,
- .filter_config = &qed_configure_filter,
+ .filter_config_rx_mode = &qed_configure_filter_rx_mode,
+ .filter_config_ucast = &qed_configure_filter_ucast,
+ .filter_config_mcast = &qed_configure_filter_mcast,
.fastpath_stop = &qed_fastpath_stop,
.eth_cqe_completion = &qed_fp_cqe_completion,
.get_vport_stats = &qed_get_vport_stats,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h
index 8eceeebb1a7b..a538cf478c14 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.h
@@ -92,18 +92,18 @@ struct qed_filter_mcast {
};
/**
- * @brief qed_eth_rx_queue_stop - This ramrod closes an Rx queue
+ * qed_eth_rx_queue_stop(): This ramrod closes an Rx queue.
*
- * @param p_hwfn
- * @param p_rxq Handler of queue to close
- * @param eq_completion_only If True completion will be on
- * EQe, if False completion will be
- * on EQe if p_hwfn opaque
- * different from the RXQ opaque
- * otherwise on CQe.
- * @param cqe_completion If True completion will be
- * receive on CQe.
- * @return int
+ * @p_hwfn: HW device data.
+ * @p_rxq: Handler of queue to close
+ * @eq_completion_only: If True completion will be on
+ * EQe, if False completion will be
+ * on EQe if p_hwfn opaque
+ * different from the RXQ opaque
+ * otherwise on CQe.
+ * @cqe_completion: If True completion will be receive on CQe.
+ *
+ * Return: Int.
*/
int
qed_eth_rx_queue_stop(struct qed_hwfn *p_hwfn,
@@ -111,12 +111,12 @@ qed_eth_rx_queue_stop(struct qed_hwfn *p_hwfn,
bool eq_completion_only, bool cqe_completion);
/**
- * @brief qed_eth_tx_queue_stop - closes a Tx queue
+ * qed_eth_tx_queue_stop(): Closes a Tx queue.
*
- * @param p_hwfn
- * @param p_txq - handle to Tx queue needed to be closed
+ * @p_hwfn: HW device data.
+ * @p_txq: handle to Tx queue needed to be closed.
*
- * @return int
+ * Return: Int.
*/
int qed_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, void *p_txq);
@@ -146,7 +146,6 @@ struct qed_sp_vport_start_params {
int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
struct qed_sp_vport_start_params *p_params);
-
struct qed_filter_accept_flags {
u8 update_rx_mode_config;
u8 update_tx_mode_config;
@@ -205,16 +204,15 @@ int qed_sp_vport_update(struct qed_hwfn *p_hwfn,
struct qed_spq_comp_cb *p_comp_data);
/**
- * @brief qed_sp_vport_stop -
- *
- * This ramrod closes a VPort after all its RX and TX queues are terminated.
- * An Assert is generated if any queues are left open.
+ * qed_sp_vport_stop: This ramrod closes a VPort after all its
+ * RX and TX queues are terminated.
+ * An Assert is generated if any queues are left open.
*
- * @param p_hwfn
- * @param opaque_fid
- * @param vport_id VPort ID
+ * @p_hwfn: HW device data.
+ * @opaque_fid: Opaque FID
+ * @vport_id: VPort ID.
*
- * @return int
+ * Return: Int.
*/
int qed_sp_vport_stop(struct qed_hwfn *p_hwfn, u16 opaque_fid, u8 vport_id);
@@ -225,22 +223,21 @@ int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn,
struct qed_spq_comp_cb *p_comp_data);
/**
- * @brief qed_sp_rx_eth_queues_update -
+ * qed_sp_eth_rx_queues_update(): This ramrod updates an RX queue.
+ * It is used for setting the active state
+ * of the queue and updating the TPA and
+ * SGE parameters.
+ * @p_hwfn: HW device data.
+ * @pp_rxq_handlers: An array of queue handlers to be updated.
+ * @num_rxqs: number of queues to update.
+ * @complete_cqe_flg: Post completion to the CQE Ring if set.
+ * @complete_event_flg: Post completion to the Event Ring if set.
+ * @comp_mode: Comp mode.
+ * @p_comp_data: Pointer Comp data.
*
- * This ramrod updates an RX queue. It is used for setting the active state
- * of the queue and updating the TPA and SGE parameters.
+ * Return: Int.
*
- * @note At the moment - only used by non-linux VFs.
- *
- * @param p_hwfn
- * @param pp_rxq_handlers An array of queue handlers to be updated.
- * @param num_rxqs number of queues to update.
- * @param complete_cqe_flg Post completion to the CQE Ring if set
- * @param complete_event_flg Post completion to the Event Ring if set
- * @param comp_mode
- * @param p_comp_data
- *
- * @return int
+ * Note At the moment - only used by non-linux VFs.
*/
int
@@ -257,30 +254,32 @@ void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats);
void qed_reset_vport_stats(struct qed_dev *cdev);
/**
- * *@brief qed_arfs_mode_configure -
- *
- **Enable or disable rfs mode. It must accept atleast one of tcp or udp true
- **and atleast one of ipv4 or ipv6 true to enable rfs mode.
+ * qed_arfs_mode_configure(): Enable or disable rfs mode.
+ * It must accept at least one of tcp or udp true
+ * and at least one of ipv4 or ipv6 true to enable
+ * rfs mode.
*
- **@param p_hwfn
- **@param p_ptt
- **@param p_cfg_params - arfs mode configuration parameters.
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @p_cfg_params: arfs mode configuration parameters.
*
+ * Return. Void.
*/
void qed_arfs_mode_configure(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_arfs_config_params *p_cfg_params);
/**
- * @brief - qed_configure_rfs_ntuple_filter
+ * qed_configure_rfs_ntuple_filter(): This ramrod should be used to add
+ * or remove arfs hw filter
*
- * This ramrod should be used to add or remove arfs hw filter
+ * @p_hwfn: HW device data.
+ * @p_cb: Used for QED_SPQ_MODE_CB,where client would initialize
+ * it with cookie and callback function address, if not
+ * using this mode then client must pass NULL.
+ * @p_params: Pointer to params.
*
- * @params p_hwfn
- * @params p_cb - Used for QED_SPQ_MODE_CB,where client would initialize
- * it with cookie and callback function address, if not
- * using this mode then client must pass NULL.
- * @params p_params
+ * Return: Void.
*/
int
qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn,
@@ -374,16 +373,17 @@ qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
struct qed_sp_vport_start_params *p_params);
/**
- * @brief - Starts an Rx queue, when queue_cid is already prepared
+ * qed_eth_rxq_start_ramrod(): Starts an Rx queue, when queue_cid is
+ * already prepared
*
- * @param p_hwfn
- * @param p_cid
- * @param bd_max_bytes
- * @param bd_chain_phys_addr
- * @param cqe_pbl_addr
- * @param cqe_pbl_size
+ * @p_hwfn: HW device data.
+ * @p_cid: Pointer CID.
+ * @bd_max_bytes: Max bytes.
+ * @bd_chain_phys_addr: Chain physcial address.
+ * @cqe_pbl_addr: PBL address.
+ * @cqe_pbl_size: PBL size.
*
- * @return int
+ * Return: Int.
*/
int
qed_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
@@ -393,15 +393,16 @@ qed_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size);
/**
- * @brief - Starts a Tx queue, where queue_cid is already prepared
+ * qed_eth_txq_start_ramrod(): Starts a Tx queue, where queue_cid is
+ * already prepared
*
- * @param p_hwfn
- * @param p_cid
- * @param pbl_addr
- * @param pbl_size
- * @param p_pq_params - parameters for choosing the PQ for this Tx queue
+ * @p_hwfn: HW device data.
+ * @p_cid: Pointer CID.
+ * @pbl_addr: PBL address.
+ * @pbl_size: PBL size.
+ * @pq_id: Parameters for choosing the PQ for this Tx queue.
*
- * @return int
+ * Return: Int.
*/
int
qed_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index c46a7f756ed5..ed274f033626 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -28,6 +28,7 @@
#include "qed_cxt.h"
#include "qed_dev_api.h"
#include "qed_hsi.h"
+#include "qed_iro_hsi.h"
#include "qed_hw.h"
#include "qed_int.h"
#include "qed_ll2.h"
@@ -43,6 +44,8 @@
#define QED_LL2_TX_SIZE (256)
#define QED_LL2_RX_SIZE (4096)
+#define QED_LL2_INVALID_STATS_ID 0xff
+
struct qed_cb_ll2_info {
int rx_cnt;
u32 rx_size;
@@ -62,6 +65,29 @@ struct qed_ll2_buffer {
dma_addr_t phys_addr;
};
+static u8 qed_ll2_handle_to_stats_id(struct qed_hwfn *p_hwfn,
+ u8 ll2_queue_type, u8 qid)
+{
+ u8 stats_id;
+
+ /* For legacy (RAM based) queues, the stats_id will be set as the
+ * queue_id. Otherwise (context based queue), it will be set to
+ * the "abs_pf_id" offset from the end of the RAM based queue IDs.
+ * If the final value exceeds the total counters amount, return
+ * INVALID value to indicate that the stats for this connection should
+ * be disabled.
+ */
+ if (ll2_queue_type == QED_LL2_RX_TYPE_LEGACY)
+ stats_id = qid;
+ else
+ stats_id = MAX_NUM_LL2_RX_RAM_QUEUES + p_hwfn->abs_pf_id;
+
+ if (stats_id < MAX_NUM_LL2_TX_STATS_COUNTERS)
+ return stats_id;
+ else
+ return QED_LL2_INVALID_STATS_ID;
+}
+
static void qed_ll2b_complete_tx_packet(void *cxt,
u8 connection_handle,
void *cookie,
@@ -106,7 +132,7 @@ static int qed_ll2_alloc_buffer(struct qed_dev *cdev,
}
static int qed_ll2_dealloc_buffer(struct qed_dev *cdev,
- struct qed_ll2_buffer *buffer)
+ struct qed_ll2_buffer *buffer)
{
spin_lock_bh(&cdev->ll2->lock);
@@ -352,7 +378,7 @@ static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
num_bds_in_packet = p_pkt->bd_used;
list_del(&p_pkt->list_entry);
- if (num_bds < num_bds_in_packet) {
+ if (unlikely(num_bds < num_bds_in_packet)) {
DP_NOTICE(p_hwfn,
"Rest of BDs does not cover whole packet\n");
goto out;
@@ -462,7 +488,7 @@ qed_ll2_rxq_handle_completion(struct qed_hwfn *p_hwfn,
if (!list_empty(&p_rx->active_descq))
p_pkt = list_first_entry(&p_rx->active_descq,
struct qed_ll2_rx_packet, list_entry);
- if (!p_pkt) {
+ if (unlikely(!p_pkt)) {
DP_NOTICE(p_hwfn,
"[%d] LL2 Rx completion but active_descq is empty\n",
p_ll2_conn->input.conn_type);
@@ -475,7 +501,7 @@ qed_ll2_rxq_handle_completion(struct qed_hwfn *p_hwfn,
qed_ll2_rxq_parse_reg(p_hwfn, p_cqe, &data);
else
qed_ll2_rxq_parse_gsi(p_hwfn, p_cqe, &data);
- if (qed_chain_consume(&p_rx->rxq_chain) != p_pkt->rxq_bd)
+ if (unlikely(qed_chain_consume(&p_rx->rxq_chain) != p_pkt->rxq_bd))
DP_NOTICE(p_hwfn,
"Mismatch between active_descq and the LL2 Rx chain\n");
@@ -597,18 +623,18 @@ static bool
qed_ll2_lb_rxq_handler_slowpath(struct qed_hwfn *p_hwfn,
struct core_rx_slow_path_cqe *p_cqe)
{
- struct ooo_opaque *iscsi_ooo;
+ struct ooo_opaque *ooo_opq;
u32 cid;
if (p_cqe->ramrod_cmd_id != CORE_RAMROD_RX_QUEUE_FLUSH)
return false;
- iscsi_ooo = (struct ooo_opaque *)&p_cqe->opaque_data;
- if (iscsi_ooo->ooo_opcode != TCP_EVENT_DELETE_ISLES)
+ ooo_opq = (struct ooo_opaque *)&p_cqe->opaque_data;
+ if (ooo_opq->ooo_opcode != TCP_EVENT_DELETE_ISLES)
return false;
/* Need to make a flush */
- cid = le32_to_cpu(iscsi_ooo->cid);
+ cid = le32_to_cpu(ooo_opq->cid);
qed_ooo_release_connection_isles(p_hwfn, p_hwfn->p_ooo_info, cid);
return true;
@@ -624,7 +650,7 @@ static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn,
union core_rx_cqe_union *cqe = NULL;
u16 cq_new_idx = 0, cq_old_idx = 0;
struct qed_ooo_buffer *p_buffer;
- struct ooo_opaque *iscsi_ooo;
+ struct ooo_opaque *ooo_opq;
u8 placement_offset = 0;
u8 cqe_type;
@@ -645,7 +671,7 @@ static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn,
&cqe->rx_cqe_sp))
continue;
- if (cqe_type != CORE_RX_CQE_TYPE_REGULAR) {
+ if (unlikely(cqe_type != CORE_RX_CQE_TYPE_REGULAR)) {
DP_NOTICE(p_hwfn,
"Got a non-regular LB LL2 completion [type 0x%02x]\n",
cqe_type);
@@ -657,22 +683,21 @@ static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn,
parse_flags = le16_to_cpu(p_cqe_fp->parse_flags.flags);
packet_length = le16_to_cpu(p_cqe_fp->packet_length);
vlan = le16_to_cpu(p_cqe_fp->vlan);
- iscsi_ooo = (struct ooo_opaque *)&p_cqe_fp->opaque_data;
- qed_ooo_save_history_entry(p_hwfn, p_hwfn->p_ooo_info,
- iscsi_ooo);
- cid = le32_to_cpu(iscsi_ooo->cid);
+ ooo_opq = (struct ooo_opaque *)&p_cqe_fp->opaque_data;
+ qed_ooo_save_history_entry(p_hwfn, p_hwfn->p_ooo_info, ooo_opq);
+ cid = le32_to_cpu(ooo_opq->cid);
/* Process delete isle first */
- if (iscsi_ooo->drop_size)
+ if (ooo_opq->drop_size)
qed_ooo_delete_isles(p_hwfn, p_hwfn->p_ooo_info, cid,
- iscsi_ooo->drop_isle,
- iscsi_ooo->drop_size);
+ ooo_opq->drop_isle,
+ ooo_opq->drop_size);
- if (iscsi_ooo->ooo_opcode == TCP_EVENT_NOP)
+ if (ooo_opq->ooo_opcode == TCP_EVENT_NOP)
continue;
/* Now process create/add/join isles */
- if (list_empty(&p_rx->active_descq)) {
+ if (unlikely(list_empty(&p_rx->active_descq))) {
DP_NOTICE(p_hwfn,
"LL2 OOO RX chain has no submitted buffers\n"
);
@@ -682,12 +707,12 @@ static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn,
p_pkt = list_first_entry(&p_rx->active_descq,
struct qed_ll2_rx_packet, list_entry);
- if ((iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_NEW_ISLE) ||
- (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_ISLE_RIGHT) ||
- (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_ISLE_LEFT) ||
- (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_PEN) ||
- (iscsi_ooo->ooo_opcode == TCP_EVENT_JOIN)) {
- if (!p_pkt) {
+ if (likely(ooo_opq->ooo_opcode == TCP_EVENT_ADD_NEW_ISLE ||
+ ooo_opq->ooo_opcode == TCP_EVENT_ADD_ISLE_RIGHT ||
+ ooo_opq->ooo_opcode == TCP_EVENT_ADD_ISLE_LEFT ||
+ ooo_opq->ooo_opcode == TCP_EVENT_ADD_PEN ||
+ ooo_opq->ooo_opcode == TCP_EVENT_JOIN)) {
+ if (unlikely(!p_pkt)) {
DP_NOTICE(p_hwfn,
"LL2 OOO RX packet is not valid\n");
return -EIO;
@@ -701,19 +726,19 @@ static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn,
qed_chain_consume(&p_rx->rxq_chain);
list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
- switch (iscsi_ooo->ooo_opcode) {
+ switch (ooo_opq->ooo_opcode) {
case TCP_EVENT_ADD_NEW_ISLE:
qed_ooo_add_new_isle(p_hwfn,
p_hwfn->p_ooo_info,
cid,
- iscsi_ooo->ooo_isle,
+ ooo_opq->ooo_isle,
p_buffer);
break;
case TCP_EVENT_ADD_ISLE_RIGHT:
qed_ooo_add_new_buffer(p_hwfn,
p_hwfn->p_ooo_info,
cid,
- iscsi_ooo->ooo_isle,
+ ooo_opq->ooo_isle,
p_buffer,
QED_OOO_RIGHT_BUF);
break;
@@ -721,7 +746,7 @@ static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn,
qed_ooo_add_new_buffer(p_hwfn,
p_hwfn->p_ooo_info,
cid,
- iscsi_ooo->ooo_isle,
+ ooo_opq->ooo_isle,
p_buffer,
QED_OOO_LEFT_BUF);
break;
@@ -729,13 +754,12 @@ static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn,
qed_ooo_add_new_buffer(p_hwfn,
p_hwfn->p_ooo_info,
cid,
- iscsi_ooo->ooo_isle +
- 1,
+ ooo_opq->ooo_isle + 1,
p_buffer,
QED_OOO_LEFT_BUF);
qed_ooo_join_isles(p_hwfn,
p_hwfn->p_ooo_info,
- cid, iscsi_ooo->ooo_isle);
+ cid, ooo_opq->ooo_isle);
break;
case TCP_EVENT_ADD_PEN:
num_ooo_add_to_peninsula++;
@@ -747,7 +771,7 @@ static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn,
} else {
DP_NOTICE(p_hwfn,
"Unexpected event (%d) TX OOO completion\n",
- iscsi_ooo->ooo_opcode);
+ ooo_opq->ooo_opcode);
}
}
@@ -859,16 +883,16 @@ static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
u16 new_idx = 0, num_bds = 0;
int rc;
- if (!p_ll2_conn)
+ if (unlikely(!p_ll2_conn))
return 0;
- if (!QED_LL2_TX_REGISTERED(p_ll2_conn))
+ if (unlikely(!QED_LL2_TX_REGISTERED(p_ll2_conn)))
return 0;
new_idx = le16_to_cpu(*p_tx->p_fw_cons);
num_bds = ((s16)new_idx - (s16)p_tx->bds_idx);
- if (!num_bds)
+ if (unlikely(!num_bds))
return 0;
while (num_bds) {
@@ -877,10 +901,10 @@ static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
p_pkt = list_first_entry(&p_tx->active_descq,
struct qed_ll2_tx_packet, list_entry);
- if (!p_pkt)
+ if (unlikely(!p_pkt))
return -EINVAL;
- if (p_pkt->bd_used != 1) {
+ if (unlikely(p_pkt->bd_used != 1)) {
DP_NOTICE(p_hwfn,
"Unexpectedly many BDs(%d) in TX OOO completion\n",
p_pkt->bd_used);
@@ -1008,7 +1032,7 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
if (!QED_LL2_TX_REGISTERED(p_ll2_conn))
return 0;
- if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO)
+ if (likely(p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO))
p_ll2_conn->tx_stats_en = 0;
else
p_ll2_conn->tx_stats_en = 1;
@@ -1124,6 +1148,7 @@ static int qed_sp_ll2_tx_queue_stop(struct qed_hwfn *p_hwfn,
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
int rc = -EINVAL;
+
qed_db_recovery_del(p_hwfn->cdev, p_tx->doorbell_addr, &p_tx->db_msg);
/* Get SPQ entry */
@@ -1533,7 +1558,7 @@ static inline u8 qed_ll2_handle_to_queue_id(struct qed_hwfn *p_hwfn,
int qed_ll2_establish_connection(void *cxt, u8 connection_handle)
{
- struct e4_core_conn_context *p_cxt;
+ struct core_conn_context *p_cxt;
struct qed_ll2_tx_packet *p_pkt;
struct qed_ll2_info *p_ll2_conn;
struct qed_hwfn *p_hwfn = cxt;
@@ -1544,7 +1569,7 @@ int qed_ll2_establish_connection(void *cxt, u8 connection_handle)
int rc = -EINVAL;
u32 i, capacity;
size_t desc_size;
- u8 qid;
+ u8 qid, stats_id;
p_ptt = qed_ptt_acquire(p_hwfn);
if (!p_ptt)
@@ -1610,16 +1635,32 @@ int qed_ll2_establish_connection(void *cxt, u8 connection_handle)
qid = qed_ll2_handle_to_queue_id(p_hwfn, connection_handle,
p_ll2_conn->input.rx_conn_type);
+ stats_id = qed_ll2_handle_to_stats_id(p_hwfn,
+ p_ll2_conn->input.rx_conn_type,
+ qid);
p_ll2_conn->queue_id = qid;
- p_ll2_conn->tx_stats_id = qid;
+ p_ll2_conn->tx_stats_id = stats_id;
- DP_VERBOSE(p_hwfn, QED_MSG_LL2,
- "Establishing ll2 queue. PF %d ctx_based=%d abs qid=%d\n",
- p_hwfn->rel_pf_id, p_ll2_conn->input.rx_conn_type, qid);
+ /* If there is no valid stats id for this connection, disable stats */
+ if (p_ll2_conn->tx_stats_id == QED_LL2_INVALID_STATS_ID) {
+ p_ll2_conn->tx_stats_en = 0;
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_LL2,
+ "Disabling stats for queue %d - not enough counters\n",
+ qid);
+ }
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_LL2,
+ "Establishing ll2 queue. PF %d ctx_based=%d abs qid=%d stats_id=%d\n",
+ p_hwfn->rel_pf_id,
+ p_ll2_conn->input.rx_conn_type, qid, stats_id);
if (p_ll2_conn->input.rx_conn_type == QED_LL2_RX_TYPE_LEGACY) {
- p_rx->set_prod_addr = p_hwfn->regview +
- GTT_BAR0_MAP_REG_TSDM_RAM + TSTORM_LL2_RX_PRODS_OFFSET(qid);
+ p_rx->set_prod_addr =
+ (u8 __iomem *)p_hwfn->regview +
+ GET_GTT_REG_ADDR(GTT_BAR0_MAP_REG_TSDM_RAM,
+ TSTORM_LL2_RX_PRODS, qid);
} else {
/* QED_LL2_RX_TYPE_CTX - using doorbell */
p_rx->ctx_based = 1;
@@ -1762,7 +1803,7 @@ int qed_ll2_post_rx_buffer(void *cxt,
}
}
- /* If we're lacking entires, let's try to flush buffers to FW */
+ /* If we're lacking entries, let's try to flush buffers to FW */
if (!p_curp || !p_curb) {
rc = -EBUSY;
p_curp = NULL;
@@ -1842,8 +1883,8 @@ qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
}
start_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
- if (QED_IS_IWARP_PERSONALITY(p_hwfn) &&
- p_ll2->input.conn_type == QED_LL2_TYPE_OOO) {
+ if (likely(QED_IS_IWARP_PERSONALITY(p_hwfn) &&
+ p_ll2->input.conn_type == QED_LL2_TYPE_OOO)) {
start_bd->nw_vlan_or_lb_echo =
cpu_to_le16(IWARP_LL2_IN_ORDER_TX_QUEUE);
} else {
@@ -1964,28 +2005,29 @@ int qed_ll2_prepare_tx_packet(void *cxt,
int rc = 0;
p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
- if (!p_ll2_conn)
+ if (unlikely(!p_ll2_conn))
return -EINVAL;
p_tx = &p_ll2_conn->tx_queue;
p_tx_chain = &p_tx->txq_chain;
- if (pkt->num_of_bds > p_ll2_conn->input.tx_max_bds_per_packet)
+ if (unlikely(pkt->num_of_bds > p_ll2_conn->input.tx_max_bds_per_packet))
return -EIO;
spin_lock_irqsave(&p_tx->lock, flags);
- if (p_tx->cur_send_packet) {
+ if (unlikely(p_tx->cur_send_packet)) {
rc = -EEXIST;
goto out;
}
/* Get entry, but only if we have tx elements for it */
- if (!list_empty(&p_tx->free_descq))
+ if (unlikely(!list_empty(&p_tx->free_descq)))
p_curp = list_first_entry(&p_tx->free_descq,
struct qed_ll2_tx_packet, list_entry);
- if (p_curp && qed_chain_get_elem_left(p_tx_chain) < pkt->num_of_bds)
+ if (unlikely(p_curp &&
+ qed_chain_get_elem_left(p_tx_chain) < pkt->num_of_bds))
p_curp = NULL;
- if (!p_curp) {
+ if (unlikely(!p_curp)) {
rc = -EBUSY;
goto out;
}
@@ -2014,16 +2056,16 @@ int qed_ll2_set_fragment_of_tx_packet(void *cxt,
unsigned long flags;
p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
- if (!p_ll2_conn)
+ if (unlikely(!p_ll2_conn))
return -EINVAL;
- if (!p_ll2_conn->tx_queue.cur_send_packet)
+ if (unlikely(!p_ll2_conn->tx_queue.cur_send_packet))
return -EINVAL;
p_cur_send_packet = p_ll2_conn->tx_queue.cur_send_packet;
cur_send_frag_num = p_ll2_conn->tx_queue.cur_send_frag_num;
- if (cur_send_frag_num >= p_cur_send_packet->bd_used)
+ if (unlikely(cur_send_frag_num >= p_cur_send_packet->bd_used))
return -EINVAL;
/* Fill the BD information, and possibly notify FW */
@@ -2609,7 +2651,6 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
DP_NOTICE(cdev, "Failed to add an LLH filter\n");
goto err3;
}
-
}
ether_addr_copy(cdev->ll2_mac_address, params->ll2_mac_address);
@@ -2651,7 +2692,7 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb,
*/
nr_frags = skb_shinfo(skb)->nr_frags;
- if (1 + nr_frags > CORE_LL2_TX_MAX_BDS_PER_PACKET) {
+ if (unlikely(1 + nr_frags > CORE_LL2_TX_MAX_BDS_PER_PACKET)) {
DP_ERR(cdev, "Cannot transmit a packet with %d fragments\n",
1 + nr_frags);
return -EINVAL;
@@ -2693,7 +2734,7 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb,
*/
rc = qed_ll2_prepare_tx_packet(p_hwfn, cdev->ll2->handle,
&pkt, 1);
- if (rc)
+ if (unlikely(rc))
goto err;
for (i = 0; i < nr_frags; i++) {
@@ -2717,7 +2758,7 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb,
/* if failed not much to do here, partial packet has been posted
* we can't free memory, will need to wait for completion
*/
- if (rc)
+ if (unlikely(rc))
goto err2;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.h b/drivers/net/ethernet/qlogic/qed/qed_ll2.h
index df88d00053a2..0bfc375161ed 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.h
@@ -32,7 +32,6 @@
#define QED_LL2_LEGACY_CONN_BASE_PF 0
#define QED_LL2_CTX_CONN_BASE_PF QED_MAX_NUM_OF_LEGACY_LL2_CONNS_PF
-
struct qed_ll2_rx_packet {
struct list_head list_entry;
struct core_rx_bd_with_buff_len *rxq_bd;
@@ -119,41 +118,41 @@ struct qed_ll2_info {
extern const struct qed_ll2_ops qed_ll2_ops_pass;
/**
- * @brief qed_ll2_acquire_connection - allocate resources,
- * starts rx & tx (if relevant) queues pair. Provides
- * connecion handler as output parameter.
+ * qed_ll2_acquire_connection(): Allocate resources,
+ * starts rx & tx (if relevant) queues pair.
+ * Provides connecion handler as output
+ * parameter.
*
+ * @cxt: Pointer to the hw-function [opaque to some].
+ * @data: Describes connection parameters.
*
- * @param cxt - pointer to the hw-function [opaque to some]
- * @param data - describes connection parameters
- * @return int
+ * Return: Int.
*/
int qed_ll2_acquire_connection(void *cxt, struct qed_ll2_acquire_data *data);
/**
- * @brief qed_ll2_establish_connection - start previously
- * allocated LL2 queues pair
+ * qed_ll2_establish_connection(): start previously allocated LL2 queues pair
*
- * @param cxt - pointer to the hw-function [opaque to some]
- * @param p_ptt
- * @param connection_handle LL2 connection's handle obtained from
- * qed_ll2_require_connection
+ * @cxt: Pointer to the hw-function [opaque to some].
+ * @connection_handle: LL2 connection's handle obtained from
+ * qed_ll2_require_connection.
*
- * @return 0 on success, failure otherwise
+ * Return: 0 on success, failure otherwise.
*/
int qed_ll2_establish_connection(void *cxt, u8 connection_handle);
/**
- * @brief qed_ll2_post_rx_buffers - submit buffers to LL2 Rx queue.
+ * qed_ll2_post_rx_buffer(): Submit buffers to LL2 Rx queue.
*
- * @param cxt - pointer to the hw-function [opaque to some]
- * @param connection_handle LL2 connection's handle obtained from
- * qed_ll2_require_connection
- * @param addr rx (physical address) buffers to submit
- * @param cookie
- * @param notify_fw produce corresponding Rx BD immediately
+ * @cxt: Pointer to the hw-function [opaque to some].
+ * @connection_handle: LL2 connection's handle obtained from
+ * qed_ll2_require_connection.
+ * @addr: RX (physical address) buffers to submit.
+ * @buf_len: Buffer Len.
+ * @cookie: Cookie.
+ * @notify_fw: Produce corresponding Rx BD immediately.
*
- * @return 0 on success, failure otherwise
+ * Return: 0 on success, failure otherwise.
*/
int qed_ll2_post_rx_buffer(void *cxt,
u8 connection_handle,
@@ -161,15 +160,15 @@ int qed_ll2_post_rx_buffer(void *cxt,
u16 buf_len, void *cookie, u8 notify_fw);
/**
- * @brief qed_ll2_prepare_tx_packet - request for start Tx BD
- * to prepare Tx packet submission to FW.
+ * qed_ll2_prepare_tx_packet(): Request for start Tx BD
+ * to prepare Tx packet submission to FW.
*
- * @param cxt - pointer to the hw-function [opaque to some]
- * @param connection_handle
- * @param pkt - info regarding the tx packet
- * @param notify_fw - issue doorbell to fw for this packet
+ * @cxt: Pointer to the hw-function [opaque to some].
+ * @connection_handle: Connection handle.
+ * @pkt: Info regarding the tx packet.
+ * @notify_fw: Issue doorbell to fw for this packet.
*
- * @return 0 on success, failure otherwise
+ * Return: 0 on success, failure otherwise.
*/
int qed_ll2_prepare_tx_packet(void *cxt,
u8 connection_handle,
@@ -177,81 +176,83 @@ int qed_ll2_prepare_tx_packet(void *cxt,
bool notify_fw);
/**
- * @brief qed_ll2_release_connection - releases resources
- * allocated for LL2 connection
+ * qed_ll2_release_connection(): Releases resources allocated for LL2
+ * connection.
+ *
+ * @cxt: Pointer to the hw-function [opaque to some].
+ * @connection_handle: LL2 connection's handle obtained from
+ * qed_ll2_require_connection.
*
- * @param cxt - pointer to the hw-function [opaque to some]
- * @param connection_handle LL2 connection's handle obtained from
- * qed_ll2_require_connection
+ * Return: Void.
*/
void qed_ll2_release_connection(void *cxt, u8 connection_handle);
/**
- * @brief qed_ll2_set_fragment_of_tx_packet - provides fragments to fill
- * Tx BD of BDs requested by
- * qed_ll2_prepare_tx_packet
+ * qed_ll2_set_fragment_of_tx_packet(): Provides fragments to fill
+ * Tx BD of BDs requested by
+ * qed_ll2_prepare_tx_packet
*
- * @param cxt - pointer to the hw-function [opaque to some]
- * @param connection_handle LL2 connection's handle
- * obtained from
- * qed_ll2_require_connection
- * @param addr
- * @param nbytes
+ * @cxt: Pointer to the hw-function [opaque to some].
+ * @connection_handle: LL2 connection's handle obtained from
+ * qed_ll2_require_connection.
+ * @addr: Address.
+ * @nbytes: Number of bytes.
*
- * @return 0 on success, failure otherwise
+ * Return: 0 on success, failure otherwise.
*/
int qed_ll2_set_fragment_of_tx_packet(void *cxt,
u8 connection_handle,
dma_addr_t addr, u16 nbytes);
/**
- * @brief qed_ll2_terminate_connection - stops Tx/Rx queues
- *
+ * qed_ll2_terminate_connection(): Stops Tx/Rx queues
*
- * @param cxt - pointer to the hw-function [opaque to some]
- * @param connection_handle LL2 connection's handle
- * obtained from
- * qed_ll2_require_connection
+ * @cxt: Pointer to the hw-function [opaque to some].
+ * @connection_handle: LL2 connection's handle obtained from
+ * qed_ll2_require_connection.
*
- * @return 0 on success, failure otherwise
+ * Return: 0 on success, failure otherwise.
*/
int qed_ll2_terminate_connection(void *cxt, u8 connection_handle);
/**
- * @brief qed_ll2_get_stats - get LL2 queue's statistics
+ * qed_ll2_get_stats(): Get LL2 queue's statistics
*
+ * @cxt: Pointer to the hw-function [opaque to some].
+ * @connection_handle: LL2 connection's handle obtained from
+ * qed_ll2_require_connection.
+ * @p_stats: Pointer Status.
*
- * @param cxt - pointer to the hw-function [opaque to some]
- * @param connection_handle LL2 connection's handle obtained from
- * qed_ll2_require_connection
- * @param p_stats
- *
- * @return 0 on success, failure otherwise
+ * Return: 0 on success, failure otherwise.
*/
int qed_ll2_get_stats(void *cxt,
u8 connection_handle, struct qed_ll2_stats *p_stats);
/**
- * @brief qed_ll2_alloc - Allocates LL2 connections set
+ * qed_ll2_alloc(): Allocates LL2 connections set.
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
*
- * @return int
+ * Return: Int.
*/
int qed_ll2_alloc(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_ll2_setup - Inits LL2 connections set
+ * qed_ll2_setup(): Inits LL2 connections set.
+ *
+ * @p_hwfn: HW device data.
*
- * @param p_hwfn
+ * Return: Void.
*
*/
void qed_ll2_setup(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_ll2_free - Releases LL2 connections set
+ * qed_ll2_free(): Releases LL2 connections set
+ *
+ * @p_hwfn: HW device data.
*
- * @param p_hwfn
+ * Return: Void.
*
*/
void qed_ll2_free(struct qed_hwfn *p_hwfn);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index d10e1cd6d2ba..7673b3e07736 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -99,10 +99,6 @@ static const u32 qed_mfw_ext_10g[] __initconst = {
ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT,
};
-static const u32 qed_mfw_ext_20g[] __initconst = {
- ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT,
-};
-
static const u32 qed_mfw_ext_25g[] __initconst = {
ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
@@ -148,7 +144,6 @@ static const u32 qed_mfw_ext_100g_base_r4[] __initconst = {
static struct qed_mfw_speed_map qed_mfw_ext_maps[] __ro_after_init = {
QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_1G, qed_mfw_ext_1g),
QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_10G, qed_mfw_ext_10g),
- QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_20G, qed_mfw_ext_20g),
QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_25G, qed_mfw_ext_25g),
QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_40G, qed_mfw_ext_40g),
QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_50G_BASE_R,
@@ -262,7 +257,7 @@ module_exit(qed_exit);
/* Check if the DMA controller on the machine can properly handle the DMA
* addressing required by the device.
-*/
+ */
static int qed_set_coherency_mask(struct qed_dev *cdev)
{
struct device *dev = &cdev->pdev->dev;
@@ -547,7 +542,7 @@ static struct qed_dev *qed_probe(struct pci_dev *pdev,
goto err2;
}
- DP_INFO(cdev, "qed_probe completed successfully\n");
+ DP_INFO(cdev, "%s completed successfully\n", __func__);
return cdev;
@@ -980,7 +975,7 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev,
rc = qed_set_int_mode(cdev, false);
if (rc) {
- DP_ERR(cdev, "qed_slowpath_setup_int ERR\n");
+ DP_ERR(cdev, "%s ERR\n", __func__);
return rc;
}
@@ -1161,6 +1156,7 @@ static int qed_slowpath_delayed_work(struct qed_hwfn *hwfn,
/* Memory barrier for setting atomic bit */
smp_mb__before_atomic();
set_bit(wq_flag, &hwfn->slowpath_task_flags);
+ /* Memory barrier after setting atomic bit */
smp_mb__after_atomic();
queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, delay);
@@ -1382,7 +1378,7 @@ static int qed_slowpath_start(struct qed_dev *cdev,
(params->drv_minor << 16) |
(params->drv_rev << 8) |
(params->drv_eng);
- strlcpy(drv_version.name, params->name,
+ strscpy(drv_version.name, params->name,
MCP_DRV_VER_STR_SIZE - 4);
rc = qed_mcp_send_drv_version(hwfn, hwfn->p_main_ptt,
&drv_version);
@@ -2892,7 +2888,7 @@ static int qed_update_drv_state(struct qed_dev *cdev, bool active)
return status;
}
-static int qed_update_mac(struct qed_dev *cdev, u8 *mac)
+static int qed_update_mac(struct qed_dev *cdev, const u8 *mac)
{
struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
struct qed_ptt *ptt;
@@ -3079,8 +3075,10 @@ int qed_mfw_tlv_req(struct qed_hwfn *hwfn)
DP_VERBOSE(hwfn->cdev, NETIF_MSG_DRV,
"Scheduling slowpath task [Flag: %d]\n",
QED_SLOWPATH_MFW_TLV_REQ);
+ /* Memory barrier for setting atomic bit */
smp_mb__before_atomic();
set_bit(QED_SLOWPATH_MFW_TLV_REQ, &hwfn->slowpath_task_flags);
+ /* Memory barrier after setting atomic bit */
smp_mb__after_atomic();
queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, 0);
@@ -3159,3 +3157,8 @@ int qed_mfw_fill_tlv_data(struct qed_hwfn *hwfn, enum qed_mfw_tlv_type type,
return 0;
}
+
+unsigned long qed_get_epoch_time(void)
+{
+ return ktime_get_real_seconds();
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index 24cd41567775..64678a256f3b 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -17,6 +17,7 @@
#include "qed_cxt.h"
#include "qed_dcbx.h"
#include "qed_hsi.h"
+#include "qed_mfw_hsi.h"
#include "qed_hw.h"
#include "qed_mcp.h"
#include "qed_reg_addr.h"
@@ -30,11 +31,11 @@
#define QED_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */
#define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val) \
- qed_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \
+ qed_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + (_offset)), \
_val)
#define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \
- qed_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset))
+ qed_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + (_offset)))
#define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val) \
DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \
@@ -384,7 +385,7 @@ qed_mcp_update_pending_cmd(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
p_mb_params->mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
/* Get the union data */
- if (p_mb_params->p_data_dst != NULL && p_mb_params->data_dst_size) {
+ if (p_mb_params->p_data_dst && p_mb_params->data_dst_size) {
u32 union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
offsetof(struct public_drv_mb,
union_data);
@@ -410,7 +411,7 @@ static void __qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
offsetof(struct public_drv_mb, union_data);
memset(&union_data, 0, sizeof(union_data));
- if (p_mb_params->p_data_src != NULL && p_mb_params->data_src_size)
+ if (p_mb_params->p_data_src && p_mb_params->data_src_size)
memcpy(&union_data, p_mb_params->p_data_src,
p_mb_params->data_src_size);
qed_memcpy_to(p_hwfn, p_ptt, union_data_addr, &union_data,
@@ -671,7 +672,8 @@ int qed_mcp_nvm_rd_cmd(struct qed_hwfn *p_hwfn,
u32 cmd,
u32 param,
u32 *o_mcp_resp,
- u32 *o_mcp_param, u32 *o_txn_size, u32 *o_buf)
+ u32 *o_mcp_param,
+ u32 *o_txn_size, u32 *o_buf, bool b_can_sleep)
{
struct qed_mcp_mb_params mb_params;
u8 raw_data[MCP_DRV_NVM_BUF_LEN];
@@ -684,6 +686,8 @@ int qed_mcp_nvm_rd_cmd(struct qed_hwfn *p_hwfn,
/* Use the maximal value since the actual one is part of the response */
mb_params.data_dst_size = MCP_DRV_NVM_BUF_LEN;
+ if (b_can_sleep)
+ mb_params.flags = QED_MB_FLAG_CAN_SLEEP;
rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
if (rc)
@@ -916,7 +920,6 @@ enum qed_load_req_force {
};
static void qed_get_mfw_force_cmd(struct qed_hwfn *p_hwfn,
-
enum qed_load_req_force force_cmd,
u8 *p_mfw_force_cmd)
{
@@ -1526,15 +1529,13 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up)
FW_MB_PARAM_FEATURE_SUPPORT_EXT_SPEED_FEC_CONTROL) {
ext_speed = 0;
if (params->ext_speed.autoneg)
- ext_speed |= ETH_EXT_SPEED_AN;
+ ext_speed |= ETH_EXT_SPEED_NONE;
val = params->ext_speed.forced_speed;
if (val & QED_EXT_SPEED_1G)
ext_speed |= ETH_EXT_SPEED_1G;
if (val & QED_EXT_SPEED_10G)
ext_speed |= ETH_EXT_SPEED_10G;
- if (val & QED_EXT_SPEED_20G)
- ext_speed |= ETH_EXT_SPEED_20G;
if (val & QED_EXT_SPEED_25G)
ext_speed |= ETH_EXT_SPEED_25G;
if (val & QED_EXT_SPEED_40G)
@@ -1560,8 +1561,6 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up)
ext_speed |= ETH_EXT_ADV_SPEED_1G;
if (val & QED_EXT_SPEED_MASK_10G)
ext_speed |= ETH_EXT_ADV_SPEED_10G;
- if (val & QED_EXT_SPEED_MASK_20G)
- ext_speed |= ETH_EXT_ADV_SPEED_20G;
if (val & QED_EXT_SPEED_MASK_25G)
ext_speed |= ETH_EXT_ADV_SPEED_25G;
if (val & QED_EXT_SPEED_MASK_40G)
@@ -2081,7 +2080,7 @@ int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *p_mfw_ver, u32 *p_running_bundle_id)
{
- u32 global_offsize;
+ u32 global_offsize, public_base;
if (IS_VF(p_hwfn->cdev)) {
if (p_hwfn->vf_iov_info) {
@@ -2098,16 +2097,16 @@ int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn,
}
}
+ public_base = p_hwfn->mcp_info->public_base;
global_offsize = qed_rd(p_hwfn, p_ptt,
- SECTION_OFFSIZE_ADDR(p_hwfn->
- mcp_info->public_base,
+ SECTION_OFFSIZE_ADDR(public_base,
PUBLIC_GLOBAL));
*p_mfw_ver =
qed_rd(p_hwfn, p_ptt,
SECTION_ADDR(global_offsize,
0) + offsetof(struct public_global, mfw_ver));
- if (p_running_bundle_id != NULL) {
+ if (p_running_bundle_id) {
*p_running_bundle_id = qed_rd(p_hwfn, p_ptt,
SECTION_ADDR(global_offsize, 0) +
offsetof(struct public_global,
@@ -2209,6 +2208,7 @@ int qed_mcp_get_transceiver_data(struct qed_hwfn *p_hwfn,
return 0;
}
+
static bool qed_is_transceiver_ready(u32 transceiver_state,
u32 transceiver_type)
{
@@ -2378,7 +2378,7 @@ qed_mcp_get_shmem_proto_legacy(struct qed_hwfn *p_hwfn,
DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
"According to Legacy capabilities, L2 personality is %08x\n",
- (u32) *p_proto);
+ (u32)*p_proto);
}
static int
@@ -2423,7 +2423,7 @@ qed_mcp_get_shmem_proto_mfw(struct qed_hwfn *p_hwfn,
DP_VERBOSE(p_hwfn,
NETIF_MSG_IFUP,
"According to capabilities, L2 personality is %08x [resp %08x param %08x]\n",
- (u32) *p_proto, resp, param);
+ (u32)*p_proto, resp, param);
return 0;
}
@@ -2445,9 +2445,6 @@ qed_mcp_get_shmem_proto(struct qed_hwfn *p_hwfn,
case FUNC_MF_CFG_PROTOCOL_ISCSI:
*p_proto = QED_PCI_ISCSI;
break;
- case FUNC_MF_CFG_PROTOCOL_NVMETCP:
- *p_proto = QED_PCI_NVMETCP;
- break;
case FUNC_MF_CFG_PROTOCOL_FCOE:
*p_proto = QED_PCI_FCOE;
break;
@@ -2854,7 +2851,7 @@ int qed_mcp_ov_update_mtu(struct qed_hwfn *p_hwfn,
}
int qed_mcp_ov_update_mac(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt, u8 *mac)
+ struct qed_ptt *p_ptt, const u8 *mac)
{
struct qed_mcp_mb_params mb_params;
u32 mfw_mac[2];
@@ -3026,7 +3023,7 @@ int qed_mcp_nvm_read(struct qed_dev *cdev, u32 addr, u8 *p_buf, u32 len)
DRV_MB_PARAM_NVM_LEN_OFFSET),
&resp, &resp_param,
&read_len,
- (u32 *)(p_buf + offset));
+ (u32 *)(p_buf + offset), false);
if (rc || (resp != FW_MSG_CODE_NVM_OK)) {
DP_NOTICE(cdev, "MCP command rc = %d\n", rc);
@@ -3034,7 +3031,7 @@ int qed_mcp_nvm_read(struct qed_dev *cdev, u32 addr, u8 *p_buf, u32 len)
}
/* This can be a lengthy process, and it's possible scheduler
- * isn't preemptable. Sleep a bit to prevent CPU hogging.
+ * isn't preemptible. Sleep a bit to prevent CPU hogging.
*/
if (bytes_left % 0x1000 <
(bytes_left - read_len) % 0x1000)
@@ -3129,10 +3126,12 @@ int qed_mcp_nvm_write(struct qed_dev *cdev,
* to be delivered to MFW.
*/
if (param && cmd == QED_PUT_FILE_DATA) {
- buf_idx = QED_MFW_GET_FIELD(param,
- FW_MB_PARAM_NVM_PUT_FILE_REQ_OFFSET);
- buf_size = QED_MFW_GET_FIELD(param,
- FW_MB_PARAM_NVM_PUT_FILE_REQ_SIZE);
+ buf_idx =
+ QED_MFW_GET_FIELD(param,
+ FW_MB_PARAM_NVM_PUT_FILE_REQ_OFFSET);
+ buf_size =
+ QED_MFW_GET_FIELD(param,
+ FW_MB_PARAM_NVM_PUT_FILE_REQ_SIZE);
} else {
buf_idx += buf_size;
buf_size = min_t(u32, (len - buf_idx),
@@ -3176,7 +3175,7 @@ int qed_mcp_phy_sfp_read(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
DRV_MSG_CODE_TRANSCEIVER_READ,
nvm_offset, &resp, &param, &buf_size,
- (u32 *)(p_buf + offset));
+ (u32 *)(p_buf + offset), true);
if (rc) {
DP_NOTICE(p_hwfn,
"Failed to send a transceiver read command to the MFW. rc = %d.\n",
@@ -3275,7 +3274,7 @@ int qed_mcp_bist_nvm_get_image_att(struct qed_hwfn *p_hwfn,
DRV_MSG_CODE_BIST_TEST, param,
&resp, &resp_param,
&buf_size,
- (u32 *)p_image_att);
+ (u32 *)p_image_att, false);
if (rc)
return rc;
@@ -3388,7 +3387,7 @@ qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn,
type = NVM_TYPE_DEFAULT_CFG;
break;
case QED_NVM_IMAGE_NVM_META:
- type = NVM_TYPE_META;
+ type = NVM_TYPE_NVM_META;
break;
default:
DP_NOTICE(p_hwfn, "Unknown request of image_id %08x\n",
@@ -3905,10 +3904,6 @@ int qed_mcp_set_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK |
DRV_MB_PARAM_FEATURE_SUPPORT_PORT_FEC_CONTROL;
- if (QED_IS_E5(p_hwfn->cdev))
- features |=
- DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EXT_SPEED_FEC_CONTROL;
-
return qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_FEATURE_SUPPORT,
features, &mcp_resp, &mcp_param);
}
@@ -4002,7 +3997,8 @@ int qed_mcp_nvm_get_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
DRV_MSG_CODE_GET_NVM_CFG_OPTION,
- mb_param, &resp, &param, p_len, (u32 *)p_buf);
+ mb_param, &resp, &param, p_len,
+ (u32 *)p_buf, false);
return rc;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
index 8edb450d0abf..564723800d15 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
@@ -266,97 +266,97 @@ union qed_mfw_tlv_data {
#define QED_NVM_CFG_OPTION_ENTITY_SEL BIT(4)
/**
- * @brief - returns the link params of the hw function
+ * qed_mcp_get_link_params(): Returns the link params of the hw function.
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
*
- * @returns pointer to link params
+ * Returns: Pointer to link params.
*/
-struct qed_mcp_link_params *qed_mcp_get_link_params(struct qed_hwfn *);
+struct qed_mcp_link_params *qed_mcp_get_link_params(struct qed_hwfn *p_hwfn);
/**
- * @brief - return the link state of the hw function
+ * qed_mcp_get_link_state(): Return the link state of the hw function.
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
*
- * @returns pointer to link state
+ * Returns: Pointer to link state.
*/
-struct qed_mcp_link_state *qed_mcp_get_link_state(struct qed_hwfn *);
+struct qed_mcp_link_state *qed_mcp_get_link_state(struct qed_hwfn *p_hwfn);
/**
- * @brief - return the link capabilities of the hw function
+ * qed_mcp_get_link_capabilities(): Return the link capabilities of the
+ * hw function.
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
*
- * @returns pointer to link capabilities
+ * Returns: Pointer to link capabilities.
*/
struct qed_mcp_link_capabilities
*qed_mcp_get_link_capabilities(struct qed_hwfn *p_hwfn);
/**
- * @brief Request the MFW to set the the link according to 'link_input'.
+ * qed_mcp_set_link(): Request the MFW to set the link according
+ * to 'link_input'.
*
- * @param p_hwfn
- * @param p_ptt
- * @param b_up - raise link if `true'. Reset link if `false'.
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @b_up: Raise link if `true'. Reset link if `false'.
*
- * @return int
+ * Return: Int.
*/
int qed_mcp_set_link(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
bool b_up);
/**
- * @brief Get the management firmware version value
+ * qed_mcp_get_mfw_ver(): Get the management firmware version value.
*
- * @param p_hwfn
- * @param p_ptt
- * @param p_mfw_ver - mfw version value
- * @param p_running_bundle_id - image id in nvram; Optional.
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @p_mfw_ver: MFW version value.
+ * @p_running_bundle_id: Image id in nvram; Optional.
*
- * @return int - 0 - operation was successful.
+ * Return: Int - 0 - operation was successful.
*/
int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *p_mfw_ver, u32 *p_running_bundle_id);
/**
- * @brief Get the MBI version value
+ * qed_mcp_get_mbi_ver(): Get the MBI version value.
*
- * @param p_hwfn
- * @param p_ptt
- * @param p_mbi_ver - A pointer to a variable to be filled with the MBI version.
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @p_mbi_ver: A pointer to a variable to be filled with the MBI version.
*
- * @return int - 0 - operation was successful.
+ * Return: Int - 0 - operation was successful.
*/
int qed_mcp_get_mbi_ver(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u32 *p_mbi_ver);
/**
- * @brief Get media type value of the port.
+ * qed_mcp_get_media_type(): Get media type value of the port.
*
- * @param cdev - qed dev pointer
- * @param p_ptt
- * @param mfw_ver - media type value
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @media_type: Media type value
*
- * @return int -
- * 0 - Operation was successul.
- * -EBUSY - Operation failed
+ * Return: Int - 0 - Operation was successul.
+ * -EBUSY - Operation failed
*/
int qed_mcp_get_media_type(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u32 *media_type);
/**
- * @brief Get transceiver data of the port.
+ * qed_mcp_get_transceiver_data(): Get transceiver data of the port.
*
- * @param cdev - qed dev pointer
- * @param p_ptt
- * @param p_transceiver_state - transceiver state.
- * @param p_transceiver_type - media type value
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @p_transceiver_state: Transceiver state.
+ * @p_tranceiver_type: Media type value.
*
- * @return int -
- * 0 - Operation was successful.
- * -EBUSY - Operation failed
+ * Return: Int - 0 - Operation was successul.
+ * -EBUSY - Operation failed
*/
int qed_mcp_get_transceiver_data(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
@@ -364,50 +364,48 @@ int qed_mcp_get_transceiver_data(struct qed_hwfn *p_hwfn,
u32 *p_tranceiver_type);
/**
- * @brief Get transceiver supported speed mask.
+ * qed_mcp_trans_speed_mask(): Get transceiver supported speed mask.
*
- * @param cdev - qed dev pointer
- * @param p_ptt
- * @param p_speed_mask - Bit mask of all supported speeds.
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @p_speed_mask: Bit mask of all supported speeds.
*
- * @return int -
- * 0 - Operation was successful.
- * -EBUSY - Operation failed
+ * Return: Int - 0 - Operation was successul.
+ * -EBUSY - Operation failed
*/
int qed_mcp_trans_speed_mask(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u32 *p_speed_mask);
/**
- * @brief Get board configuration.
+ * qed_mcp_get_board_config(): Get board configuration.
*
- * @param cdev - qed dev pointer
- * @param p_ptt
- * @param p_board_config - Board config.
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @p_board_config: Board config.
*
- * @return int -
- * 0 - Operation was successful.
- * -EBUSY - Operation failed
+ * Return: Int - 0 - Operation was successul.
+ * -EBUSY - Operation failed
*/
int qed_mcp_get_board_config(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u32 *p_board_config);
/**
- * @brief General function for sending commands to the MCP
- * mailbox. It acquire mutex lock for the entire
- * operation, from sending the request until the MCP
- * response. Waiting for MCP response will be checked up
- * to 5 seconds every 5ms.
+ * qed_mcp_cmd(): General function for sending commands to the MCP
+ * mailbox. It acquire mutex lock for the entire
+ * operation, from sending the request until the MCP
+ * response. Waiting for MCP response will be checked up
+ * to 5 seconds every 5ms.
*
- * @param p_hwfn - hw function
- * @param p_ptt - PTT required for register access
- * @param cmd - command to be sent to the MCP.
- * @param param - Optional param
- * @param o_mcp_resp - The MCP response code (exclude sequence).
- * @param o_mcp_param- Optional parameter provided by the MCP
+ * @p_hwfn: HW device data.
+ * @p_ptt: PTT required for register access.
+ * @cmd: command to be sent to the MCP.
+ * @param: Optional param
+ * @o_mcp_resp: The MCP response code (exclude sequence).
+ * @o_mcp_param: Optional parameter provided by the MCP
* response
- * @return int - 0 - operation
- * was successul.
+ *
+ * Return: Int - 0 - Operation was successul.
*/
int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
@@ -417,37 +415,39 @@ int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
u32 *o_mcp_param);
/**
- * @brief - drains the nig, allowing completion to pass in case of pauses.
- * (Should be called only from sleepable context)
+ * qed_mcp_drain(): drains the nig, allowing completion to pass in
+ * case of pauses.
+ * (Should be called only from sleepable context)
*
- * @param p_hwfn
- * @param p_ptt
+ * @p_hwfn: HW device data.
+ * @p_ptt: PTT required for register access.
+ *
+ * Return: Int.
*/
int qed_mcp_drain(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
/**
- * @brief Get the flash size value
+ * qed_mcp_get_flash_size(): Get the flash size value.
*
- * @param p_hwfn
- * @param p_ptt
- * @param p_flash_size - flash size in bytes to be filled.
+ * @p_hwfn: HW device data.
+ * @p_ptt: PTT required for register access.
+ * @p_flash_size: Flash size in bytes to be filled.
*
- * @return int - 0 - operation was successul.
+ * Return: Int - 0 - Operation was successul.
*/
int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *p_flash_size);
/**
- * @brief Send driver version to MFW
+ * qed_mcp_send_drv_version(): Send driver version to MFW.
*
- * @param p_hwfn
- * @param p_ptt
- * @param version - Version value
- * @param name - Protocol driver name
+ * @p_hwfn: HW device data.
+ * @p_ptt: PTT required for register access.
+ * @p_ver: Version value.
*
- * @return int - 0 - operation was successul.
+ * Return: Int - 0 - Operation was successul.
*/
int
qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
@@ -455,146 +455,148 @@ qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
struct qed_mcp_drv_version *p_ver);
/**
- * @brief Read the MFW process kill counter
+ * qed_get_process_kill_counter(): Read the MFW process kill counter.
*
- * @param p_hwfn
- * @param p_ptt
+ * @p_hwfn: HW device data.
+ * @p_ptt: PTT required for register access.
*
- * @return u32
+ * Return: u32.
*/
u32 qed_get_process_kill_counter(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
/**
- * @brief Trigger a recovery process
+ * qed_start_recovery_process(): Trigger a recovery process.
*
- * @param p_hwfn
- * @param p_ptt
+ * @p_hwfn: HW device data.
+ * @p_ptt: PTT required for register access.
*
- * @return int
+ * Return: Int.
*/
int qed_start_recovery_process(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
/**
- * @brief A recovery handler must call this function as its first step.
- * It is assumed that the handler is not run from an interrupt context.
+ * qed_recovery_prolog(): A recovery handler must call this function
+ * as its first step.
+ * It is assumed that the handler is not run from
+ * an interrupt context.
*
- * @param cdev
- * @param p_ptt
+ * @cdev: Qed dev pointer.
*
- * @return int
+ * Return: int.
*/
int qed_recovery_prolog(struct qed_dev *cdev);
/**
- * @brief Notify MFW about the change in base device properties
+ * qed_mcp_ov_update_current_config(): Notify MFW about the change in base
+ * device properties
*
- * @param p_hwfn
- * @param p_ptt
- * @param client - qed client type
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @client: Qed client type.
*
- * @return int - 0 - operation was successful.
+ * Return: Int - 0 - Operation was successul.
*/
int qed_mcp_ov_update_current_config(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
enum qed_ov_client client);
/**
- * @brief Notify MFW about the driver state
+ * qed_mcp_ov_update_driver_state(): Notify MFW about the driver state.
*
- * @param p_hwfn
- * @param p_ptt
- * @param drv_state - Driver state
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @drv_state: Driver state.
*
- * @return int - 0 - operation was successful.
+ * Return: Int - 0 - Operation was successul.
*/
int qed_mcp_ov_update_driver_state(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
enum qed_ov_driver_state drv_state);
/**
- * @brief Send MTU size to MFW
+ * qed_mcp_ov_update_mtu(): Send MTU size to MFW.
*
- * @param p_hwfn
- * @param p_ptt
- * @param mtu - MTU size
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @mtu: MTU size.
*
- * @return int - 0 - operation was successful.
+ * Return: Int - 0 - Operation was successul.
*/
int qed_mcp_ov_update_mtu(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u16 mtu);
/**
- * @brief Send MAC address to MFW
+ * qed_mcp_ov_update_mac(): Send MAC address to MFW.
*
- * @param p_hwfn
- * @param p_ptt
- * @param mac - MAC address
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @mac: MAC address.
*
- * @return int - 0 - operation was successful.
+ * Return: Int - 0 - Operation was successul.
*/
int qed_mcp_ov_update_mac(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt, u8 *mac);
+ struct qed_ptt *p_ptt, const u8 *mac);
/**
- * @brief Send WOL mode to MFW
+ * qed_mcp_ov_update_wol(): Send WOL mode to MFW.
*
- * @param p_hwfn
- * @param p_ptt
- * @param wol - WOL mode
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @wol: WOL mode.
*
- * @return int - 0 - operation was successful.
+ * Return: Int - 0 - Operation was successul.
*/
int qed_mcp_ov_update_wol(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
enum qed_ov_wol wol);
/**
- * @brief Set LED status
+ * qed_mcp_set_led(): Set LED status.
*
- * @param p_hwfn
- * @param p_ptt
- * @param mode - LED mode
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @mode: LED mode.
*
- * @return int - 0 - operation was successful.
+ * Return: Int - 0 - Operation was successul.
*/
int qed_mcp_set_led(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
enum qed_led_mode mode);
/**
- * @brief Read from nvm
+ * qed_mcp_nvm_read(): Read from NVM.
*
- * @param cdev
- * @param addr - nvm offset
- * @param p_buf - nvm read buffer
- * @param len - buffer len
+ * @cdev: Qed dev pointer.
+ * @addr: NVM offset.
+ * @p_buf: NVM read buffer.
+ * @len: Buffer len.
*
- * @return int - 0 - operation was successful.
+ * Return: Int - 0 - Operation was successul.
*/
int qed_mcp_nvm_read(struct qed_dev *cdev, u32 addr, u8 *p_buf, u32 len);
/**
- * @brief Write to nvm
+ * qed_mcp_nvm_write(): Write to NVM.
*
- * @param cdev
- * @param addr - nvm offset
- * @param cmd - nvm command
- * @param p_buf - nvm write buffer
- * @param len - buffer len
+ * @cdev: Qed dev pointer.
+ * @addr: NVM offset.
+ * @cmd: NVM command.
+ * @p_buf: NVM write buffer.
+ * @len: Buffer len.
*
- * @return int - 0 - operation was successful.
+ * Return: Int - 0 - Operation was successul.
*/
int qed_mcp_nvm_write(struct qed_dev *cdev,
u32 cmd, u32 addr, u8 *p_buf, u32 len);
/**
- * @brief Check latest response
+ * qed_mcp_nvm_resp(): Check latest response.
*
- * @param cdev
- * @param p_buf - nvm write buffer
+ * @cdev: Qed dev pointer.
+ * @p_buf: NVM write buffer.
*
- * @return int - 0 - operation was successful.
+ * Return: Int - 0 - Operation was successul.
*/
int qed_mcp_nvm_resp(struct qed_dev *cdev, u8 *p_buf);
@@ -604,13 +606,13 @@ struct qed_nvm_image_att {
};
/**
- * @brief Allows reading a whole nvram image
+ * qed_mcp_get_nvm_image_att(): Allows reading a whole nvram image.
*
- * @param p_hwfn
- * @param image_id - image to get attributes for
- * @param p_image_att - image attributes structure into which to fill data
+ * @p_hwfn: HW device data.
+ * @image_id: Image to get attributes for.
+ * @p_image_att: Image attributes structure into which to fill data.
*
- * @return int - 0 - operation was successful.
+ * Return: Int - 0 - Operation was successul.
*/
int
qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn,
@@ -618,64 +620,65 @@ qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn,
struct qed_nvm_image_att *p_image_att);
/**
- * @brief Allows reading a whole nvram image
+ * qed_mcp_get_nvm_image(): Allows reading a whole nvram image.
*
- * @param p_hwfn
- * @param image_id - image requested for reading
- * @param p_buffer - allocated buffer into which to fill data
- * @param buffer_len - length of the allocated buffer.
+ * @p_hwfn: HW device data.
+ * @image_id: image requested for reading.
+ * @p_buffer: allocated buffer into which to fill data.
+ * @buffer_len: length of the allocated buffer.
*
- * @return 0 iff p_buffer now contains the nvram image.
+ * Return: 0 if p_buffer now contains the nvram image.
*/
int qed_mcp_get_nvm_image(struct qed_hwfn *p_hwfn,
enum qed_nvm_images image_id,
u8 *p_buffer, u32 buffer_len);
/**
- * @brief Bist register test
+ * qed_mcp_bist_register_test(): Bist register test.
*
- * @param p_hwfn - hw function
- * @param p_ptt - PTT required for register access
+ * @p_hwfn: HW device data.
+ * @p_ptt: PTT required for register access.
*
- * @return int - 0 - operation was successful.
+ * Return: Int - 0 - Operation was successul.
*/
int qed_mcp_bist_register_test(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
/**
- * @brief Bist clock test
+ * qed_mcp_bist_clock_test(): Bist clock test.
*
- * @param p_hwfn - hw function
- * @param p_ptt - PTT required for register access
+ * @p_hwfn: HW device data.
+ * @p_ptt: PTT required for register access.
*
- * @return int - 0 - operation was successful.
+ * Return: Int - 0 - Operation was successul.
*/
int qed_mcp_bist_clock_test(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
/**
- * @brief Bist nvm test - get number of images
+ * qed_mcp_bist_nvm_get_num_images(): Bist nvm test - get number of images.
*
- * @param p_hwfn - hw function
- * @param p_ptt - PTT required for register access
- * @param num_images - number of images if operation was
+ * @p_hwfn: HW device data.
+ * @p_ptt: PTT required for register access.
+ * @num_images: number of images if operation was
* successful. 0 if not.
*
- * @return int - 0 - operation was successful.
+ * Return: Int - 0 - Operation was successul.
*/
int qed_mcp_bist_nvm_get_num_images(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *num_images);
/**
- * @brief Bist nvm test - get image attributes by index
+ * qed_mcp_bist_nvm_get_image_att(): Bist nvm test - get image attributes
+ * by index.
*
- * @param p_hwfn - hw function
- * @param p_ptt - PTT required for register access
- * @param p_image_att - Attributes of image
- * @param image_index - Index of image to get information for
+ * @p_hwfn: HW device data.
+ * @p_ptt: PTT required for register access.
+ * @p_image_att: Attributes of image.
+ * @image_index: Index of image to get information for.
*
- * @return int - 0 - operation was successful.
+ * Return: Int - 0 - Operation was successul.
*/
int qed_mcp_bist_nvm_get_image_att(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
@@ -683,23 +686,26 @@ int qed_mcp_bist_nvm_get_image_att(struct qed_hwfn *p_hwfn,
u32 image_index);
/**
- * @brief - Processes the TLV request from MFW i.e., get the required TLV info
- * from the qed client and send it to the MFW.
+ * qed_mfw_process_tlv_req(): Processes the TLV request from MFW i.e.,
+ * get the required TLV info
+ * from the qed client and send it to the MFW.
*
- * @param p_hwfn
- * @param p_ptt
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
*
- * @param return 0 upon success.
+ * Return: 0 upon success.
*/
int qed_mfw_process_tlv_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
/**
- * @brief Send raw debug data to the MFW
+ * qed_mcp_send_raw_debug_data(): Send raw debug data to the MFW
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @p_buf: raw debug data buffer.
+ * @size: Buffer size.
*
- * @param p_hwfn
- * @param p_ptt
- * @param p_buf - raw debug data buffer
- * @param size - buffer size
+ * Return : Int.
*/
int
qed_mcp_send_raw_debug_data(struct qed_hwfn *p_hwfn,
@@ -796,47 +802,49 @@ qed_mcp_is_ext_speed_supported(const struct qed_hwfn *p_hwfn)
}
/**
- * @brief Initialize the interface with the MCP
+ * qed_mcp_cmd_init(): Initialize the interface with the MCP.
*
- * @param p_hwfn - HW func
- * @param p_ptt - PTT required for register access
+ * @p_hwfn: HW device data.
+ * @p_ptt: PTT required for register access.
*
- * @return int
+ * Return: Int.
*/
int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
/**
- * @brief Initialize the port interface with the MCP
+ * qed_mcp_cmd_port_init(): Initialize the port interface with the MCP
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ *
+ * Return: Void.
*
- * @param p_hwfn
- * @param p_ptt
* Can only be called after `num_ports_in_engines' is set
*/
void qed_mcp_cmd_port_init(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
/**
- * @brief Releases resources allocated during the init process.
+ * qed_mcp_free(): Releases resources allocated during the init process.
*
- * @param p_hwfn - HW func
- * @param p_ptt - PTT required for register access
+ * @p_hwfn: HW function.
*
- * @return int
+ * Return: Int.
*/
int qed_mcp_free(struct qed_hwfn *p_hwfn);
/**
- * @brief This function is called from the DPC context. After
- * pointing PTT to the mfw mb, check for events sent by the MCP
- * to the driver and ack them. In case a critical event
- * detected, it will be handled here, otherwise the work will be
- * queued to a sleepable work-queue.
+ * qed_mcp_handle_events(): This function is called from the DPC context.
+ * After pointing PTT to the mfw mb, check for events sent by
+ * the MCP to the driver and ack them. In case a critical event
+ * detected, it will be handled here, otherwise the work will be
+ * queued to a sleepable work-queue.
+ *
+ * @p_hwfn: HW function.
+ * @p_ptt: PTT required for register access.
*
- * @param p_hwfn - HW function
- * @param p_ptt - PTT required for register access
- * @return int - 0 - operation
- * was successul.
+ * Return: Int - 0 - Operation was successul.
*/
int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
@@ -858,169 +866,177 @@ struct qed_load_req_params {
};
/**
- * @brief Sends a LOAD_REQ to the MFW, and in case the operation succeeds,
- * returns whether this PF is the first on the engine/port or function.
+ * qed_mcp_load_req(): Sends a LOAD_REQ to the MFW, and in case the
+ * operation succeeds, returns whether this PF is
+ * the first on the engine/port or function.
*
- * @param p_hwfn
- * @param p_ptt
- * @param p_params
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @p_params: Params.
*
- * @return int - 0 - Operation was successful.
+ * Return: Int - 0 - Operation was successul.
*/
int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_load_req_params *p_params);
/**
- * @brief Sends a LOAD_DONE message to the MFW
+ * qed_mcp_load_done(): Sends a LOAD_DONE message to the MFW.
*
- * @param p_hwfn
- * @param p_ptt
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
*
- * @return int - 0 - Operation was successful.
+ * Return: Int - 0 - Operation was successul.
*/
int qed_mcp_load_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
/**
- * @brief Sends a UNLOAD_REQ message to the MFW
+ * qed_mcp_unload_req(): Sends a UNLOAD_REQ message to the MFW.
*
- * @param p_hwfn
- * @param p_ptt
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
*
- * @return int - 0 - Operation was successful.
+ * Return: Int - 0 - Operation was successul.
*/
int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
/**
- * @brief Sends a UNLOAD_DONE message to the MFW
+ * qed_mcp_unload_done(): Sends a UNLOAD_DONE message to the MFW
*
- * @param p_hwfn
- * @param p_ptt
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
*
- * @return int - 0 - Operation was successful.
+ * Return: Int - 0 - Operation was successul.
*/
int qed_mcp_unload_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
/**
- * @brief Read the MFW mailbox into Current buffer.
+ * qed_mcp_read_mb(): Read the MFW mailbox into Current buffer.
*
- * @param p_hwfn
- * @param p_ptt
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ *
+ * Return: Void.
*/
void qed_mcp_read_mb(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
/**
- * @brief Ack to mfw that driver finished FLR process for VFs
+ * qed_mcp_ack_vf_flr(): Ack to mfw that driver finished FLR process for VFs
*
- * @param p_hwfn
- * @param p_ptt
- * @param vfs_to_ack - bit mask of all engine VFs for which the PF acks.
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @vfs_to_ack: bit mask of all engine VFs for which the PF acks.
*
- * @param return int - 0 upon success.
+ * Return: Int - 0 - Operation was successul.
*/
int qed_mcp_ack_vf_flr(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u32 *vfs_to_ack);
/**
- * @brief - calls during init to read shmem of all function-related info.
+ * qed_mcp_fill_shmem_func_info(): Calls during init to read shmem of
+ * all function-related info.
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
*
- * @param return 0 upon success.
+ * Return: 0 upon success.
*/
int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
/**
- * @brief - Reset the MCP using mailbox command.
+ * qed_mcp_reset(): Reset the MCP using mailbox command.
*
- * @param p_hwfn
- * @param p_ptt
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
*
- * @param return 0 upon success.
+ * Return: 0 upon success.
*/
int qed_mcp_reset(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
/**
- * @brief - Sends an NVM read command request to the MFW to get
- * a buffer.
- *
- * @param p_hwfn
- * @param p_ptt
- * @param cmd - Command: DRV_MSG_CODE_NVM_GET_FILE_DATA or
- * DRV_MSG_CODE_NVM_READ_NVRAM commands
- * @param param - [0:23] - Offset [24:31] - Size
- * @param o_mcp_resp - MCP response
- * @param o_mcp_param - MCP response param
- * @param o_txn_size - Buffer size output
- * @param o_buf - Pointer to the buffer returned by the MFW.
+ * qed_mcp_nvm_rd_cmd(): Sends an NVM read command request to the MFW to get
+ * a buffer.
*
- * @param return 0 upon success.
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @cmd: (Command) DRV_MSG_CODE_NVM_GET_FILE_DATA or
+ * DRV_MSG_CODE_NVM_READ_NVRAM commands.
+ * @param: [0:23] - Offset [24:31] - Size.
+ * @o_mcp_resp: MCP response.
+ * @o_mcp_param: MCP response param.
+ * @o_txn_size: Buffer size output.
+ * @o_buf: Pointer to the buffer returned by the MFW.
+ * @b_can_sleep: Can sleep.
+ *
+ * Return: 0 upon success.
*/
int qed_mcp_nvm_rd_cmd(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 cmd,
u32 param,
u32 *o_mcp_resp,
- u32 *o_mcp_param, u32 *o_txn_size, u32 *o_buf);
+ u32 *o_mcp_param,
+ u32 *o_txn_size, u32 *o_buf, bool b_can_sleep);
/**
- * @brief Read from sfp
+ * qed_mcp_phy_sfp_read(): Read from sfp.
*
- * @param p_hwfn - hw function
- * @param p_ptt - PTT required for register access
- * @param port - transceiver port
- * @param addr - I2C address
- * @param offset - offset in sfp
- * @param len - buffer length
- * @param p_buf - buffer to read into
+ * @p_hwfn: HW device data.
+ * @p_ptt: PTT required for register access.
+ * @port: transceiver port.
+ * @addr: I2C address.
+ * @offset: offset in sfp.
+ * @len: buffer length.
+ * @p_buf: buffer to read into.
*
- * @return int - 0 - operation was successful.
+ * Return: Int - 0 - Operation was successul.
*/
int qed_mcp_phy_sfp_read(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
u32 port, u32 addr, u32 offset, u32 len, u8 *p_buf);
/**
- * @brief indicates whether the MFW objects [under mcp_info] are accessible
+ * qed_mcp_is_init(): indicates whether the MFW objects [under mcp_info]
+ * are accessible
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
*
- * @return true iff MFW is running and mcp_info is initialized
+ * Return: true if MFW is running and mcp_info is initialized.
*/
bool qed_mcp_is_init(struct qed_hwfn *p_hwfn);
/**
- * @brief request MFW to configure MSI-X for a VF
+ * qed_mcp_config_vf_msix(): Request MFW to configure MSI-X for a VF.
*
- * @param p_hwfn
- * @param p_ptt
- * @param vf_id - absolute inside engine
- * @param num_sbs - number of entries to request
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @vf_id: absolute inside engine.
+ * @num: number of entries to request.
*
- * @return int
+ * Return: Int.
*/
int qed_mcp_config_vf_msix(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u8 vf_id, u8 num);
/**
- * @brief - Halt the MCP.
+ * qed_mcp_halt(): Halt the MCP.
*
- * @param p_hwfn
- * @param p_ptt
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
*
- * @param return 0 upon success.
+ * Return: 0 upon success.
*/
int qed_mcp_halt(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
/**
- * @brief - Wake up the MCP.
+ * qed_mcp_resume: Wake up the MCP.
*
- * @param p_hwfn
- * @param p_ptt
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
*
- * @param return 0 upon success.
+ * Return: 0 upon success.
*/
int qed_mcp_resume(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
@@ -1038,13 +1054,13 @@ int __qed_configure_pf_min_bandwidth(struct qed_hwfn *p_hwfn,
int qed_mcp_mask_parities(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u32 mask_parities);
-/* @brief - Gets the mdump retained data from the MFW.
+/* qed_mcp_mdump_get_retain(): Gets the mdump retained data from the MFW.
*
- * @param p_hwfn
- * @param p_ptt
- * @param p_mdump_retain
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @p_mdump_retain: mdump retain.
*
- * @param return 0 upon success.
+ * Return: Int - 0 - Operation was successul.
*/
int
qed_mcp_mdump_get_retain(struct qed_hwfn *p_hwfn,
@@ -1052,15 +1068,15 @@ qed_mcp_mdump_get_retain(struct qed_hwfn *p_hwfn,
struct mdump_retain_data_stc *p_mdump_retain);
/**
- * @brief - Sets the MFW's max value for the given resource
+ * qed_mcp_set_resc_max_val(): Sets the MFW's max value for the given resource.
*
- * @param p_hwfn
- * @param p_ptt
- * @param res_id
- * @param resc_max_val
- * @param p_mcp_resp
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @res_id: RES ID.
+ * @resc_max_val: Resec max val.
+ * @p_mcp_resp: MCP Resp
*
- * @return int - 0 - operation was successful.
+ * Return: Int - 0 - Operation was successul.
*/
int
qed_mcp_set_resc_max_val(struct qed_hwfn *p_hwfn,
@@ -1069,16 +1085,17 @@ qed_mcp_set_resc_max_val(struct qed_hwfn *p_hwfn,
u32 resc_max_val, u32 *p_mcp_resp);
/**
- * @brief - Gets the MFW allocation info for the given resource
+ * qed_mcp_get_resc_info(): Gets the MFW allocation info for the given
+ * resource.
*
- * @param p_hwfn
- * @param p_ptt
- * @param res_id
- * @param p_mcp_resp
- * @param p_resc_num
- * @param p_resc_start
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @res_id: Res ID.
+ * @p_mcp_resp: MCP resp.
+ * @p_resc_num: Resc num.
+ * @p_resc_start: Resc start.
*
- * @return int - 0 - operation was successful.
+ * Return: Int - 0 - Operation was successul.
*/
int
qed_mcp_get_resc_info(struct qed_hwfn *p_hwfn,
@@ -1087,13 +1104,13 @@ qed_mcp_get_resc_info(struct qed_hwfn *p_hwfn,
u32 *p_mcp_resp, u32 *p_resc_num, u32 *p_resc_start);
/**
- * @brief Send eswitch mode to MFW
+ * qed_mcp_ov_update_eswitch(): Send eswitch mode to MFW.
*
- * @param p_hwfn
- * @param p_ptt
- * @param eswitch - eswitch mode
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @eswitch: eswitch mode.
*
- * @return int - 0 - operation was successful.
+ * Return: Int - 0 - Operation was successul.
*/
int qed_mcp_ov_update_eswitch(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
@@ -1113,12 +1130,12 @@ enum qed_resc_lock {
};
/**
- * @brief - Initiates PF FLR
+ * qed_mcp_initiate_pf_flr(): Initiates PF FLR.
*
- * @param p_hwfn
- * @param p_ptt
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
*
- * @return int - 0 - operation was successful.
+ * Return: Int - 0 - Operation was successul.
*/
int qed_mcp_initiate_pf_flr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
struct qed_resc_lock_params {
@@ -1151,13 +1168,13 @@ struct qed_resc_lock_params {
};
/**
- * @brief Acquires MFW generic resource lock
+ * qed_mcp_resc_lock(): Acquires MFW generic resource lock.
*
- * @param p_hwfn
- * @param p_ptt
- * @param p_params
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @p_params: Params.
*
- * @return int - 0 - operation was successful.
+ * Return: Int - 0 - Operation was successul.
*/
int
qed_mcp_resc_lock(struct qed_hwfn *p_hwfn,
@@ -1175,13 +1192,13 @@ struct qed_resc_unlock_params {
};
/**
- * @brief Releases MFW generic resource lock
+ * qed_mcp_resc_unlock(): Releases MFW generic resource lock.
*
- * @param p_hwfn
- * @param p_ptt
- * @param p_params
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @p_params: Params.
*
- * @return int - 0 - operation was successful.
+ * Return: Int - 0 - Operation was successul.
*/
int
qed_mcp_resc_unlock(struct qed_hwfn *p_hwfn,
@@ -1189,12 +1206,15 @@ qed_mcp_resc_unlock(struct qed_hwfn *p_hwfn,
struct qed_resc_unlock_params *p_params);
/**
- * @brief - default initialization for lock/unlock resource structs
+ * qed_mcp_resc_lock_default_init(): Default initialization for
+ * lock/unlock resource structs.
+ *
+ * @p_lock: lock params struct to be initialized; Can be NULL.
+ * @p_unlock: unlock params struct to be initialized; Can be NULL.
+ * @resource: the requested resource.
+ * @b_is_permanent: disable retries & aging when set.
*
- * @param p_lock - lock params struct to be initialized; Can be NULL
- * @param p_unlock - unlock params struct to be initialized; Can be NULL
- * @param resource - the requested resource
- * @paral b_is_permanent - disable retries & aging when set
+ * Return: Void.
*/
void qed_mcp_resc_lock_default_init(struct qed_resc_lock_params *p_lock,
struct qed_resc_unlock_params *p_unlock,
@@ -1202,94 +1222,117 @@ void qed_mcp_resc_lock_default_init(struct qed_resc_lock_params *p_lock,
resource, bool b_is_permanent);
/**
- * @brief - Return whether management firmware support smart AN
+ * qed_mcp_is_smart_an_supported(): Return whether management firmware
+ * support smart AN
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
*
- * @return bool - true if feature is supported.
+ * Return: bool true if feature is supported.
*/
bool qed_mcp_is_smart_an_supported(struct qed_hwfn *p_hwfn);
/**
- * @brief Learn of supported MFW features; To be done during early init
+ * qed_mcp_get_capabilities(): Learn of supported MFW features;
+ * To be done during early init.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
*
- * @param p_hwfn
- * @param p_ptt
+ * Return: Int.
*/
int qed_mcp_get_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
/**
- * @brief Inform MFW of set of features supported by driver. Should be done
- * inside the content of the LOAD_REQ.
+ * qed_mcp_set_capabilities(): Inform MFW of set of features supported
+ * by driver. Should be done inside the content
+ * of the LOAD_REQ.
*
- * @param p_hwfn
- * @param p_ptt
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ *
+ * Return: Int.
*/
int qed_mcp_set_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
/**
- * @brief Read ufp config from the shared memory.
+ * qed_mcp_read_ufp_config(): Read ufp config from the shared memory.
*
- * @param p_hwfn
- * @param p_ptt
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ *
+ * Return: Void.
*/
void qed_mcp_read_ufp_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
/**
- * @brief Populate the nvm info shadow in the given hardware function
+ * qed_mcp_nvm_info_populate(): Populate the nvm info shadow in the given
+ * hardware function.
+ *
+ * @p_hwfn: HW device data.
*
- * @param p_hwfn
+ * Return: Int.
*/
int qed_mcp_nvm_info_populate(struct qed_hwfn *p_hwfn);
/**
- * @brief Delete nvm info shadow in the given hardware function
+ * qed_mcp_nvm_info_free(): Delete nvm info shadow in the given
+ * hardware function.
+ *
+ * @p_hwfn: HW device data.
*
- * @param p_hwfn
+ * Return: Void.
*/
void qed_mcp_nvm_info_free(struct qed_hwfn *p_hwfn);
/**
- * @brief Get the engine affinity configuration.
+ * qed_mcp_get_engine_config(): Get the engine affinity configuration.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
*
- * @param p_hwfn
- * @param p_ptt
+ * Return: Int.
*/
int qed_mcp_get_engine_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
/**
- * @brief Get the PPFID bitmap.
+ * qed_mcp_get_ppfid_bitmap(): Get the PPFID bitmap.
*
- * @param p_hwfn
- * @param p_ptt
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ *
+ * Return: Int.
*/
int qed_mcp_get_ppfid_bitmap(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
/**
- * @brief Get NVM config attribute value.
+ * qed_mcp_nvm_get_cfg(): Get NVM config attribute value.
*
- * @param p_hwfn
- * @param p_ptt
- * @param option_id
- * @param entity_id
- * @param flags
- * @param p_buf
- * @param p_len
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @option_id: Option ID.
+ * @entity_id: Entity ID.
+ * @flags: Flags.
+ * @p_buf: Buf.
+ * @p_len: Len.
+ *
+ * Return: Int.
*/
int qed_mcp_nvm_get_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
u16 option_id, u8 entity_id, u16 flags, u8 *p_buf,
u32 *p_len);
/**
- * @brief Set NVM config attribute value.
+ * qed_mcp_nvm_set_cfg(): Set NVM config attribute value.
*
- * @param p_hwfn
- * @param p_ptt
- * @param option_id
- * @param entity_id
- * @param flags
- * @param p_buf
- * @param len
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @option_id: Option ID.
+ * @entity_id: Entity ID.
+ * @flags: Flags.
+ * @p_buf: Buf.
+ * @len: Len.
+ *
+ * Return: Int.
*/
int qed_mcp_nvm_set_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
u16 option_id, u8 entity_id, u16 flags, u8 *p_buf,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mfw_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_mfw_hsi.h
new file mode 100644
index 000000000000..8a0e3c5d4bda
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_mfw_hsi.h
@@ -0,0 +1,2474 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/* QLogic qed NIC Driver
+ * Copyright (c) 2019-2021 Marvell International Ltd.
+ */
+
+#ifndef _QED_MFW_HSI_H
+#define _QED_MFW_HSI_H
+
+#define MFW_TRACE_SIGNATURE 0x25071946
+
+/* The trace in the buffer */
+#define MFW_TRACE_EVENTID_MASK 0x00ffff
+#define MFW_TRACE_PRM_SIZE_MASK 0x0f0000
+#define MFW_TRACE_PRM_SIZE_OFFSET 16
+#define MFW_TRACE_ENTRY_SIZE 3
+
+struct mcp_trace {
+ u32 signature; /* Help to identify that the trace is valid */
+ u32 size; /* the size of the trace buffer in bytes */
+ u32 curr_level; /* 2 - all will be written to the buffer
+ * 1 - debug trace will not be written
+ * 0 - just errors will be written to the buffer
+ */
+ u32 modules_mask[2]; /* a bit per module, 1 means write it, 0 means
+ * mask it.
+ */
+
+ /* Warning: the following pointers are assumed to be 32bits as they are
+ * used only in the MFW.
+ */
+ u32 trace_prod; /* The next trace will be written to this offset */
+ u32 trace_oldest; /* The oldest valid trace starts at this offset
+ * (usually very close after the current producer).
+ */
+};
+
+#define VF_MAX_STATIC 192
+#define VF_BITMAP_SIZE_IN_DWORDS (VF_MAX_STATIC / 32)
+#define VF_BITMAP_SIZE_IN_BYTES (VF_BITMAP_SIZE_IN_DWORDS * sizeof(u32))
+
+#define EXT_VF_MAX_STATIC 240
+#define EXT_VF_BITMAP_SIZE_IN_DWORDS (((EXT_VF_MAX_STATIC - 1) / 32) + 1)
+#define EXT_VF_BITMAP_SIZE_IN_BYTES (EXT_VF_BITMAP_SIZE_IN_DWORDS * sizeof(u32))
+#define ADDED_VF_BITMAP_SIZE 2
+
+#define MCP_GLOB_PATH_MAX 2
+#define MCP_PORT_MAX 2
+#define MCP_GLOB_PORT_MAX 4
+#define MCP_GLOB_FUNC_MAX 16
+
+typedef u32 offsize_t; /* In DWORDS !!! */
+/* Offset from the beginning of the MCP scratchpad */
+#define OFFSIZE_OFFSET_SHIFT 0
+#define OFFSIZE_OFFSET_MASK 0x0000ffff
+/* Size of specific element (not the whole array if any) */
+#define OFFSIZE_SIZE_SHIFT 16
+#define OFFSIZE_SIZE_MASK 0xffff0000
+
+#define SECTION_OFFSET(_offsize) (((((_offsize) & \
+ OFFSIZE_OFFSET_MASK) >> \
+ OFFSIZE_OFFSET_SHIFT) << 2))
+
+#define QED_SECTION_SIZE(_offsize) ((((_offsize) & \
+ OFFSIZE_SIZE_MASK) >> \
+ OFFSIZE_SIZE_SHIFT) << 2)
+
+#define SECTION_ADDR(_offsize, idx) (MCP_REG_SCRATCH + \
+ SECTION_OFFSET((_offsize)) + \
+ (QED_SECTION_SIZE((_offsize)) * (idx)))
+
+#define SECTION_OFFSIZE_ADDR(_pub_base, _section) \
+ ((_pub_base) + offsetof(struct mcp_public_data, sections[_section]))
+
+/* PHY configuration */
+struct eth_phy_cfg {
+ u32 speed;
+#define ETH_SPEED_AUTONEG 0x0
+#define ETH_SPEED_SMARTLINQ 0x8
+
+ u32 pause;
+#define ETH_PAUSE_NONE 0x0
+#define ETH_PAUSE_AUTONEG 0x1
+#define ETH_PAUSE_RX 0x2
+#define ETH_PAUSE_TX 0x4
+
+ u32 adv_speed;
+
+ u32 loopback_mode;
+#define ETH_LOOPBACK_NONE 0x0
+#define ETH_LOOPBACK_INT_PHY 0x1
+#define ETH_LOOPBACK_EXT_PHY 0x2
+#define ETH_LOOPBACK_EXT 0x3
+#define ETH_LOOPBACK_MAC 0x4
+#define ETH_LOOPBACK_CNIG_AH_ONLY_0123 0x5
+#define ETH_LOOPBACK_CNIG_AH_ONLY_2301 0x6
+#define ETH_LOOPBACK_PCS_AH_ONLY 0x7
+#define ETH_LOOPBACK_REVERSE_MAC_AH_ONLY 0x8
+#define ETH_LOOPBACK_INT_PHY_FEA_AH_ONLY 0x9
+
+ u32 eee_cfg;
+#define EEE_CFG_EEE_ENABLED BIT(0)
+#define EEE_CFG_TX_LPI BIT(1)
+#define EEE_CFG_ADV_SPEED_1G BIT(2)
+#define EEE_CFG_ADV_SPEED_10G BIT(3)
+#define EEE_TX_TIMER_USEC_MASK 0xfffffff0
+#define EEE_TX_TIMER_USEC_OFFSET 4
+#define EEE_TX_TIMER_USEC_BALANCED_TIME 0xa00
+#define EEE_TX_TIMER_USEC_AGGRESSIVE_TIME 0x100
+#define EEE_TX_TIMER_USEC_LATENCY_TIME 0x6000
+
+ u32 link_modes;
+
+ u32 fec_mode;
+#define FEC_FORCE_MODE_MASK 0x000000ff
+#define FEC_FORCE_MODE_OFFSET 0
+#define FEC_FORCE_MODE_NONE 0x00
+#define FEC_FORCE_MODE_FIRECODE 0x01
+#define FEC_FORCE_MODE_RS 0x02
+#define FEC_FORCE_MODE_AUTO 0x07
+#define FEC_EXTENDED_MODE_MASK 0xffffff00
+#define FEC_EXTENDED_MODE_OFFSET 8
+#define ETH_EXT_FEC_NONE 0x00000000
+#define ETH_EXT_FEC_10G_NONE 0x00000100
+#define ETH_EXT_FEC_10G_BASE_R 0x00000200
+#define ETH_EXT_FEC_25G_NONE 0x00000400
+#define ETH_EXT_FEC_25G_BASE_R 0x00000800
+#define ETH_EXT_FEC_25G_RS528 0x00001000
+#define ETH_EXT_FEC_40G_NONE 0x00002000
+#define ETH_EXT_FEC_40G_BASE_R 0x00004000
+#define ETH_EXT_FEC_50G_NONE 0x00008000
+#define ETH_EXT_FEC_50G_BASE_R 0x00010000
+#define ETH_EXT_FEC_50G_RS528 0x00020000
+#define ETH_EXT_FEC_50G_RS544 0x00040000
+#define ETH_EXT_FEC_100G_NONE 0x00080000
+#define ETH_EXT_FEC_100G_BASE_R 0x00100000
+#define ETH_EXT_FEC_100G_RS528 0x00200000
+#define ETH_EXT_FEC_100G_RS544 0x00400000
+
+ u32 extended_speed;
+#define ETH_EXT_SPEED_MASK 0x0000ffff
+#define ETH_EXT_SPEED_OFFSET 0
+#define ETH_EXT_SPEED_NONE 0x00000001
+#define ETH_EXT_SPEED_1G 0x00000002
+#define ETH_EXT_SPEED_10G 0x00000004
+#define ETH_EXT_SPEED_25G 0x00000008
+#define ETH_EXT_SPEED_40G 0x00000010
+#define ETH_EXT_SPEED_50G_BASE_R 0x00000020
+#define ETH_EXT_SPEED_50G_BASE_R2 0x00000040
+#define ETH_EXT_SPEED_100G_BASE_R2 0x00000080
+#define ETH_EXT_SPEED_100G_BASE_R4 0x00000100
+#define ETH_EXT_SPEED_100G_BASE_P4 0x00000200
+#define ETH_EXT_ADV_SPEED_MASK 0xFFFF0000
+#define ETH_EXT_ADV_SPEED_OFFSET 16
+#define ETH_EXT_ADV_SPEED_1G 0x00010000
+#define ETH_EXT_ADV_SPEED_10G 0x00020000
+#define ETH_EXT_ADV_SPEED_25G 0x00040000
+#define ETH_EXT_ADV_SPEED_40G 0x00080000
+#define ETH_EXT_ADV_SPEED_50G_BASE_R 0x00100000
+#define ETH_EXT_ADV_SPEED_50G_BASE_R2 0x00200000
+#define ETH_EXT_ADV_SPEED_100G_BASE_R2 0x00400000
+#define ETH_EXT_ADV_SPEED_100G_BASE_R4 0x00800000
+#define ETH_EXT_ADV_SPEED_100G_BASE_P4 0x01000000
+};
+
+struct port_mf_cfg {
+ u32 dynamic_cfg;
+#define PORT_MF_CFG_OV_TAG_MASK 0x0000ffff
+#define PORT_MF_CFG_OV_TAG_SHIFT 0
+#define PORT_MF_CFG_OV_TAG_DEFAULT PORT_MF_CFG_OV_TAG_MASK
+
+ u32 reserved[1];
+};
+
+struct eth_stats {
+ u64 r64;
+ u64 r127;
+ u64 r255;
+ u64 r511;
+ u64 r1023;
+ u64 r1518;
+
+ union {
+ struct {
+ u64 r1522;
+ u64 r2047;
+ u64 r4095;
+ u64 r9216;
+ u64 r16383;
+ } bb0;
+ struct {
+ u64 unused1;
+ u64 r1519_to_max;
+ u64 unused2;
+ u64 unused3;
+ u64 unused4;
+ } ah0;
+ } u0;
+
+ u64 rfcs;
+ u64 rxcf;
+ u64 rxpf;
+ u64 rxpp;
+ u64 raln;
+ u64 rfcr;
+ u64 rovr;
+ u64 rjbr;
+ u64 rund;
+ u64 rfrg;
+ u64 t64;
+ u64 t127;
+ u64 t255;
+ u64 t511;
+ u64 t1023;
+ u64 t1518;
+
+ union {
+ struct {
+ u64 t2047;
+ u64 t4095;
+ u64 t9216;
+ u64 t16383;
+ } bb1;
+ struct {
+ u64 t1519_to_max;
+ u64 unused6;
+ u64 unused7;
+ u64 unused8;
+ } ah1;
+ } u1;
+
+ u64 txpf;
+ u64 txpp;
+
+ union {
+ struct {
+ u64 tlpiec;
+ u64 tncl;
+ } bb2;
+ struct {
+ u64 unused9;
+ u64 unused10;
+ } ah2;
+ } u2;
+
+ u64 rbyte;
+ u64 rxuca;
+ u64 rxmca;
+ u64 rxbca;
+ u64 rxpok;
+ u64 tbyte;
+ u64 txuca;
+ u64 txmca;
+ u64 txbca;
+ u64 txcf;
+};
+
+struct pkt_type_cnt {
+ u64 tc_tx_pkt_cnt[8];
+ u64 tc_tx_oct_cnt[8];
+ u64 priority_rx_pkt_cnt[8];
+ u64 priority_rx_oct_cnt[8];
+};
+
+struct brb_stats {
+ u64 brb_truncate[8];
+ u64 brb_discard[8];
+};
+
+struct port_stats {
+ struct brb_stats brb;
+ struct eth_stats eth;
+};
+
+struct couple_mode_teaming {
+ u8 port_cmt[MCP_GLOB_PORT_MAX];
+#define PORT_CMT_IN_TEAM BIT(0)
+
+#define PORT_CMT_PORT_ROLE BIT(1)
+#define PORT_CMT_PORT_INACTIVE (0 << 1)
+#define PORT_CMT_PORT_ACTIVE BIT(1)
+
+#define PORT_CMT_TEAM_MASK BIT(2)
+#define PORT_CMT_TEAM0 (0 << 2)
+#define PORT_CMT_TEAM1 BIT(2)
+};
+
+#define LLDP_CHASSIS_ID_STAT_LEN 4
+#define LLDP_PORT_ID_STAT_LEN 4
+#define DCBX_MAX_APP_PROTOCOL 32
+#define MAX_SYSTEM_LLDP_TLV_DATA 32
+#define MAX_TLV_BUFFER 128
+
+enum _lldp_agent {
+ LLDP_NEAREST_BRIDGE = 0,
+ LLDP_NEAREST_NON_TPMR_BRIDGE,
+ LLDP_NEAREST_CUSTOMER_BRIDGE,
+ LLDP_MAX_LLDP_AGENTS
+};
+
+struct lldp_config_params_s {
+ u32 config;
+#define LLDP_CONFIG_TX_INTERVAL_MASK 0x000000ff
+#define LLDP_CONFIG_TX_INTERVAL_SHIFT 0
+#define LLDP_CONFIG_HOLD_MASK 0x00000f00
+#define LLDP_CONFIG_HOLD_SHIFT 8
+#define LLDP_CONFIG_MAX_CREDIT_MASK 0x0000f000
+#define LLDP_CONFIG_MAX_CREDIT_SHIFT 12
+#define LLDP_CONFIG_ENABLE_RX_MASK 0x40000000
+#define LLDP_CONFIG_ENABLE_RX_SHIFT 30
+#define LLDP_CONFIG_ENABLE_TX_MASK 0x80000000
+#define LLDP_CONFIG_ENABLE_TX_SHIFT 31
+ u32 local_chassis_id[LLDP_CHASSIS_ID_STAT_LEN];
+ u32 local_port_id[LLDP_PORT_ID_STAT_LEN];
+};
+
+struct lldp_status_params_s {
+ u32 prefix_seq_num;
+ u32 status;
+ u32 peer_chassis_id[LLDP_CHASSIS_ID_STAT_LEN];
+ u32 peer_port_id[LLDP_PORT_ID_STAT_LEN];
+ u32 suffix_seq_num;
+};
+
+struct dcbx_ets_feature {
+ u32 flags;
+#define DCBX_ETS_ENABLED_MASK 0x00000001
+#define DCBX_ETS_ENABLED_SHIFT 0
+#define DCBX_ETS_WILLING_MASK 0x00000002
+#define DCBX_ETS_WILLING_SHIFT 1
+#define DCBX_ETS_ERROR_MASK 0x00000004
+#define DCBX_ETS_ERROR_SHIFT 2
+#define DCBX_ETS_CBS_MASK 0x00000008
+#define DCBX_ETS_CBS_SHIFT 3
+#define DCBX_ETS_MAX_TCS_MASK 0x000000f0
+#define DCBX_ETS_MAX_TCS_SHIFT 4
+#define DCBX_OOO_TC_MASK 0x00000f00
+#define DCBX_OOO_TC_SHIFT 8
+ u32 pri_tc_tbl[1];
+#define DCBX_TCP_OOO_TC (4)
+#define DCBX_TCP_OOO_K2_4PORT_TC (3)
+
+#define NIG_ETS_ISCSI_OOO_CLIENT_OFFSET (DCBX_TCP_OOO_TC + 1)
+#define DCBX_CEE_STRICT_PRIORITY 0xf
+ u32 tc_bw_tbl[2];
+ u32 tc_tsa_tbl[2];
+#define DCBX_ETS_TSA_STRICT 0
+#define DCBX_ETS_TSA_CBS 1
+#define DCBX_ETS_TSA_ETS 2
+};
+
+#define DCBX_TCP_OOO_TC (4)
+#define DCBX_TCP_OOO_K2_4PORT_TC (3)
+
+struct dcbx_app_priority_entry {
+ u32 entry;
+#define DCBX_APP_PRI_MAP_MASK 0x000000ff
+#define DCBX_APP_PRI_MAP_SHIFT 0
+#define DCBX_APP_PRI_0 0x01
+#define DCBX_APP_PRI_1 0x02
+#define DCBX_APP_PRI_2 0x04
+#define DCBX_APP_PRI_3 0x08
+#define DCBX_APP_PRI_4 0x10
+#define DCBX_APP_PRI_5 0x20
+#define DCBX_APP_PRI_6 0x40
+#define DCBX_APP_PRI_7 0x80
+#define DCBX_APP_SF_MASK 0x00000300
+#define DCBX_APP_SF_SHIFT 8
+#define DCBX_APP_SF_ETHTYPE 0
+#define DCBX_APP_SF_PORT 1
+#define DCBX_APP_SF_IEEE_MASK 0x0000f000
+#define DCBX_APP_SF_IEEE_SHIFT 12
+#define DCBX_APP_SF_IEEE_RESERVED 0
+#define DCBX_APP_SF_IEEE_ETHTYPE 1
+#define DCBX_APP_SF_IEEE_TCP_PORT 2
+#define DCBX_APP_SF_IEEE_UDP_PORT 3
+#define DCBX_APP_SF_IEEE_TCP_UDP_PORT 4
+
+#define DCBX_APP_PROTOCOL_ID_MASK 0xffff0000
+#define DCBX_APP_PROTOCOL_ID_SHIFT 16
+};
+
+struct dcbx_app_priority_feature {
+ u32 flags;
+#define DCBX_APP_ENABLED_MASK 0x00000001
+#define DCBX_APP_ENABLED_SHIFT 0
+#define DCBX_APP_WILLING_MASK 0x00000002
+#define DCBX_APP_WILLING_SHIFT 1
+#define DCBX_APP_ERROR_MASK 0x00000004
+#define DCBX_APP_ERROR_SHIFT 2
+#define DCBX_APP_MAX_TCS_MASK 0x0000f000
+#define DCBX_APP_MAX_TCS_SHIFT 12
+#define DCBX_APP_NUM_ENTRIES_MASK 0x00ff0000
+#define DCBX_APP_NUM_ENTRIES_SHIFT 16
+ struct dcbx_app_priority_entry app_pri_tbl[DCBX_MAX_APP_PROTOCOL];
+};
+
+struct dcbx_features {
+ struct dcbx_ets_feature ets;
+ u32 pfc;
+#define DCBX_PFC_PRI_EN_BITMAP_MASK 0x000000ff
+#define DCBX_PFC_PRI_EN_BITMAP_SHIFT 0
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_0 0x01
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_1 0x02
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_2 0x04
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_3 0x08
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_4 0x10
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_5 0x20
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_6 0x40
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_7 0x80
+
+#define DCBX_PFC_FLAGS_MASK 0x0000ff00
+#define DCBX_PFC_FLAGS_SHIFT 8
+#define DCBX_PFC_CAPS_MASK 0x00000f00
+#define DCBX_PFC_CAPS_SHIFT 8
+#define DCBX_PFC_MBC_MASK 0x00004000
+#define DCBX_PFC_MBC_SHIFT 14
+#define DCBX_PFC_WILLING_MASK 0x00008000
+#define DCBX_PFC_WILLING_SHIFT 15
+#define DCBX_PFC_ENABLED_MASK 0x00010000
+#define DCBX_PFC_ENABLED_SHIFT 16
+#define DCBX_PFC_ERROR_MASK 0x00020000
+#define DCBX_PFC_ERROR_SHIFT 17
+
+ struct dcbx_app_priority_feature app;
+};
+
+struct dcbx_local_params {
+ u32 config;
+#define DCBX_CONFIG_VERSION_MASK 0x00000007
+#define DCBX_CONFIG_VERSION_SHIFT 0
+#define DCBX_CONFIG_VERSION_DISABLED 0
+#define DCBX_CONFIG_VERSION_IEEE 1
+#define DCBX_CONFIG_VERSION_CEE 2
+#define DCBX_CONFIG_VERSION_STATIC 4
+
+ u32 flags;
+ struct dcbx_features features;
+};
+
+struct dcbx_mib {
+ u32 prefix_seq_num;
+ u32 flags;
+ struct dcbx_features features;
+ u32 suffix_seq_num;
+};
+
+struct lldp_system_tlvs_buffer_s {
+ u32 flags;
+#define LLDP_SYSTEM_TLV_VALID_MASK 0x1
+#define LLDP_SYSTEM_TLV_VALID_OFFSET 0
+#define LLDP_SYSTEM_TLV_MANDATORY_MASK 0x2
+#define LLDP_SYSTEM_TLV_MANDATORY_SHIFT 1
+#define LLDP_SYSTEM_TLV_LENGTH_MASK 0xffff0000
+#define LLDP_SYSTEM_TLV_LENGTH_SHIFT 16
+ u32 data[MAX_SYSTEM_LLDP_TLV_DATA];
+};
+
+struct lldp_received_tlvs_s {
+ u32 prefix_seq_num;
+ u32 length;
+ u32 tlvs_buffer[MAX_TLV_BUFFER];
+ u32 suffix_seq_num;
+};
+
+struct dcb_dscp_map {
+ u32 flags;
+#define DCB_DSCP_ENABLE_MASK 0x1
+#define DCB_DSCP_ENABLE_SHIFT 0
+#define DCB_DSCP_ENABLE 1
+ u32 dscp_pri_map[8];
+};
+
+struct mcp_val64 {
+ u32 lo;
+ u32 hi;
+};
+
+struct generic_idc_msg_s {
+ u32 source_pf;
+ struct mcp_val64 msg;
+};
+
+struct pcie_stats_stc {
+ u32 sr_cnt_wr_byte_msb;
+ u32 sr_cnt_wr_byte_lsb;
+ u32 sr_cnt_wr_cnt;
+ u32 sr_cnt_rd_byte_msb;
+ u32 sr_cnt_rd_byte_lsb;
+ u32 sr_cnt_rd_cnt;
+};
+
+enum _attribute_commands_e {
+ ATTRIBUTE_CMD_READ = 0,
+ ATTRIBUTE_CMD_WRITE,
+ ATTRIBUTE_CMD_READ_CLEAR,
+ ATTRIBUTE_CMD_CLEAR,
+ ATTRIBUTE_NUM_OF_COMMANDS
+};
+
+struct public_global {
+ u32 max_path;
+ u32 max_ports;
+#define MODE_1P 1
+#define MODE_2P 2
+#define MODE_3P 3
+#define MODE_4P 4
+ u32 debug_mb_offset;
+ u32 phymod_dbg_mb_offset;
+ struct couple_mode_teaming cmt;
+ s32 internal_temperature;
+ u32 mfw_ver;
+ u32 running_bundle_id;
+ s32 external_temperature;
+ u32 mdump_reason;
+ u32 ext_phy_upgrade_fw;
+ u8 runtime_port_swap_map[MODE_4P];
+ u32 data_ptr;
+ u32 data_size;
+ u32 bmb_error_status_cnt;
+ u32 bmb_jumbo_frame_cnt;
+ u32 sent_to_bmc_cnt;
+ u32 handled_by_mfw;
+ u32 sent_to_nw_cnt;
+ u32 to_bmc_kb_per_second;
+ u32 bcast_dropped_to_bmc_cnt;
+ u32 mcast_dropped_to_bmc_cnt;
+ u32 ucast_dropped_to_bmc_cnt;
+ u32 ncsi_response_failure_cnt;
+ u32 device_attr;
+ u32 vpd_warning;
+};
+
+struct fw_flr_mb {
+ u32 aggint;
+ u32 opgen_addr;
+ u32 accum_ack;
+};
+
+struct public_path {
+ struct fw_flr_mb flr_mb;
+ u32 mcp_vf_disabled[VF_MAX_STATIC / 32];
+
+ u32 process_kill;
+#define PROCESS_KILL_COUNTER_MASK 0x0000ffff
+#define PROCESS_KILL_COUNTER_SHIFT 0
+#define PROCESS_KILL_GLOB_AEU_BIT_MASK 0xffff0000
+#define PROCESS_KILL_GLOB_AEU_BIT_SHIFT 16
+#define GLOBAL_AEU_BIT(aeu_reg_id, aeu_bit) ((aeu_reg_id) * 32 + (aeu_bit))
+};
+
+#define FC_NPIV_WWPN_SIZE 8
+#define FC_NPIV_WWNN_SIZE 8
+struct dci_npiv_settings {
+ u8 npiv_wwpn[FC_NPIV_WWPN_SIZE];
+ u8 npiv_wwnn[FC_NPIV_WWNN_SIZE];
+};
+
+struct dci_fc_npiv_cfg {
+ /* hdr used internally by the MFW */
+ u32 hdr;
+ u32 num_of_npiv;
+};
+
+#define MAX_NUMBER_NPIV 64
+struct dci_fc_npiv_tbl {
+ struct dci_fc_npiv_cfg fc_npiv_cfg;
+ struct dci_npiv_settings settings[MAX_NUMBER_NPIV];
+};
+
+struct pause_flood_monitor {
+ u8 period_cnt;
+ u8 any_brb_prs_packet_hist;
+ u8 any_brb_block_is_full_hist;
+ u8 flags;
+ u32 num_of_state_changes;
+};
+
+struct public_port {
+ u32 validity_map;
+
+ u32 link_status;
+#define LINK_STATUS_LINK_UP 0x00000001
+#define LINK_STATUS_SPEED_AND_DUPLEX_MASK 0x0000001e
+#define LINK_STATUS_SPEED_AND_DUPLEX_1000THD BIT(1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_1000TFD (2 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_10G (3 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_20G (4 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_40G (5 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_50G (6 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_100G (7 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_25G (8 << 1)
+#define LINK_STATUS_AUTO_NEGOTIATE_ENABLED 0x00000020
+#define LINK_STATUS_AUTO_NEGOTIATE_COMPLETE 0x00000040
+#define LINK_STATUS_PARALLEL_DETECTION_USED 0x00000080
+#define LINK_STATUS_PFC_ENABLED 0x00000100
+#define LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE 0x00000200
+#define LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE 0x00000400
+#define LINK_STATUS_LINK_PARTNER_10G_CAPABLE 0x00000800
+#define LINK_STATUS_LINK_PARTNER_20G_CAPABLE 0x00001000
+#define LINK_STATUS_LINK_PARTNER_40G_CAPABLE 0x00002000
+#define LINK_STATUS_LINK_PARTNER_50G_CAPABLE 0x00004000
+#define LINK_STATUS_LINK_PARTNER_100G_CAPABLE 0x00008000
+#define LINK_STATUS_LINK_PARTNER_25G_CAPABLE 0x00010000
+#define LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK 0x000c0000
+#define LINK_STATUS_LINK_PARTNER_NOT_PAUSE_CAPABLE (0 << 18)
+#define LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE BIT(18)
+#define LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE (2 << 18)
+#define LINK_STATUS_LINK_PARTNER_BOTH_PAUSE (3 << 18)
+#define LINK_STATUS_SFP_TX_FAULT 0x00100000
+#define LINK_STATUS_TX_FLOW_CONTROL_ENABLED 0x00200000
+#define LINK_STATUS_RX_FLOW_CONTROL_ENABLED 0x00400000
+#define LINK_STATUS_RX_SIGNAL_PRESENT 0x00800000
+#define LINK_STATUS_MAC_LOCAL_FAULT 0x01000000
+#define LINK_STATUS_MAC_REMOTE_FAULT 0x02000000
+#define LINK_STATUS_UNSUPPORTED_SPD_REQ 0x04000000
+
+#define LINK_STATUS_FEC_MODE_MASK 0x38000000
+#define LINK_STATUS_FEC_MODE_NONE (0 << 27)
+#define LINK_STATUS_FEC_MODE_FIRECODE_CL74 BIT(27)
+#define LINK_STATUS_FEC_MODE_RS_CL91 (2 << 27)
+#define LINK_STATUS_EXT_PHY_LINK_UP BIT(30)
+
+ u32 link_status1;
+ u32 ext_phy_fw_version;
+ u32 drv_phy_cfg_addr;
+
+ u32 port_stx;
+
+ u32 stat_nig_timer;
+
+ struct port_mf_cfg port_mf_config;
+ struct port_stats stats;
+
+ u32 media_type;
+#define MEDIA_UNSPECIFIED 0x0
+#define MEDIA_SFPP_10G_FIBER 0x1
+#define MEDIA_XFP_FIBER 0x2
+#define MEDIA_DA_TWINAX 0x3
+#define MEDIA_BASE_T 0x4
+#define MEDIA_SFP_1G_FIBER 0x5
+#define MEDIA_MODULE_FIBER 0x6
+#define MEDIA_KR 0xf0
+#define MEDIA_NOT_PRESENT 0xff
+
+ u32 lfa_status;
+ u32 link_change_count;
+
+ struct lldp_config_params_s lldp_config_params[LLDP_MAX_LLDP_AGENTS];
+ struct lldp_status_params_s lldp_status_params[LLDP_MAX_LLDP_AGENTS];
+ struct lldp_system_tlvs_buffer_s system_lldp_tlvs_buf;
+
+ /* DCBX related MIB */
+ struct dcbx_local_params local_admin_dcbx_mib;
+ struct dcbx_mib remote_dcbx_mib;
+ struct dcbx_mib operational_dcbx_mib;
+
+ u32 fc_npiv_nvram_tbl_addr;
+ u32 fc_npiv_nvram_tbl_size;
+
+ u32 transceiver_data;
+#define ETH_TRANSCEIVER_STATE_MASK 0x000000ff
+#define ETH_TRANSCEIVER_STATE_SHIFT 0x00000000
+#define ETH_TRANSCEIVER_STATE_OFFSET 0x00000000
+#define ETH_TRANSCEIVER_STATE_UNPLUGGED 0x00000000
+#define ETH_TRANSCEIVER_STATE_PRESENT 0x00000001
+#define ETH_TRANSCEIVER_STATE_VALID 0x00000003
+#define ETH_TRANSCEIVER_STATE_UPDATING 0x00000008
+#define ETH_TRANSCEIVER_STATE_IN_SETUP 0x10
+#define ETH_TRANSCEIVER_TYPE_MASK 0x0000ff00
+#define ETH_TRANSCEIVER_TYPE_OFFSET 0x8
+#define ETH_TRANSCEIVER_TYPE_NONE 0x00
+#define ETH_TRANSCEIVER_TYPE_UNKNOWN 0xff
+#define ETH_TRANSCEIVER_TYPE_1G_PCC 0x01
+#define ETH_TRANSCEIVER_TYPE_1G_ACC 0x02
+#define ETH_TRANSCEIVER_TYPE_1G_LX 0x03
+#define ETH_TRANSCEIVER_TYPE_1G_SX 0x04
+#define ETH_TRANSCEIVER_TYPE_10G_SR 0x05
+#define ETH_TRANSCEIVER_TYPE_10G_LR 0x06
+#define ETH_TRANSCEIVER_TYPE_10G_LRM 0x07
+#define ETH_TRANSCEIVER_TYPE_10G_ER 0x08
+#define ETH_TRANSCEIVER_TYPE_10G_PCC 0x09
+#define ETH_TRANSCEIVER_TYPE_10G_ACC 0x0a
+#define ETH_TRANSCEIVER_TYPE_XLPPI 0x0b
+#define ETH_TRANSCEIVER_TYPE_40G_LR4 0x0c
+#define ETH_TRANSCEIVER_TYPE_40G_SR4 0x0d
+#define ETH_TRANSCEIVER_TYPE_40G_CR4 0x0e
+#define ETH_TRANSCEIVER_TYPE_100G_AOC 0x0f
+#define ETH_TRANSCEIVER_TYPE_100G_SR4 0x10
+#define ETH_TRANSCEIVER_TYPE_100G_LR4 0x11
+#define ETH_TRANSCEIVER_TYPE_100G_ER4 0x12
+#define ETH_TRANSCEIVER_TYPE_100G_ACC 0x13
+#define ETH_TRANSCEIVER_TYPE_100G_CR4 0x14
+#define ETH_TRANSCEIVER_TYPE_4x10G_SR 0x15
+#define ETH_TRANSCEIVER_TYPE_25G_CA_N 0x16
+#define ETH_TRANSCEIVER_TYPE_25G_ACC_S 0x17
+#define ETH_TRANSCEIVER_TYPE_25G_CA_S 0x18
+#define ETH_TRANSCEIVER_TYPE_25G_ACC_M 0x19
+#define ETH_TRANSCEIVER_TYPE_25G_CA_L 0x1a
+#define ETH_TRANSCEIVER_TYPE_25G_ACC_L 0x1b
+#define ETH_TRANSCEIVER_TYPE_25G_SR 0x1c
+#define ETH_TRANSCEIVER_TYPE_25G_LR 0x1d
+#define ETH_TRANSCEIVER_TYPE_25G_AOC 0x1e
+#define ETH_TRANSCEIVER_TYPE_4x10G 0x1f
+#define ETH_TRANSCEIVER_TYPE_4x25G_CR 0x20
+#define ETH_TRANSCEIVER_TYPE_1000BASET 0x21
+#define ETH_TRANSCEIVER_TYPE_10G_BASET 0x22
+#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR 0x30
+#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR 0x31
+#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR 0x32
+#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR 0x33
+#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR 0x34
+#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR 0x35
+#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_AOC 0x36
+#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_SR 0x37
+#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_LR 0x38
+#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_SR 0x39
+#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_LR 0x3a
+
+ u32 wol_info;
+ u32 wol_pkt_len;
+ u32 wol_pkt_details;
+ struct dcb_dscp_map dcb_dscp_map;
+
+ u32 eee_status;
+#define EEE_ACTIVE_BIT BIT(0)
+#define EEE_LD_ADV_STATUS_MASK 0x000000f0
+#define EEE_LD_ADV_STATUS_OFFSET 4
+#define EEE_1G_ADV BIT(1)
+#define EEE_10G_ADV BIT(2)
+#define EEE_LP_ADV_STATUS_MASK 0x00000f00
+#define EEE_LP_ADV_STATUS_OFFSET 8
+#define EEE_SUPPORTED_SPEED_MASK 0x0000f000
+#define EEE_SUPPORTED_SPEED_OFFSET 12
+#define EEE_1G_SUPPORTED BIT(1)
+#define EEE_10G_SUPPORTED BIT(2)
+
+ u32 eee_remote;
+#define EEE_REMOTE_TW_TX_MASK 0x0000ffff
+#define EEE_REMOTE_TW_TX_OFFSET 0
+#define EEE_REMOTE_TW_RX_MASK 0xffff0000
+#define EEE_REMOTE_TW_RX_OFFSET 16
+
+ u32 module_info;
+
+ u32 oem_cfg_port;
+#define OEM_CFG_CHANNEL_TYPE_MASK 0x00000003
+#define OEM_CFG_CHANNEL_TYPE_OFFSET 0
+#define OEM_CFG_CHANNEL_TYPE_VLAN_PARTITION 0x1
+#define OEM_CFG_CHANNEL_TYPE_STAGGED 0x2
+#define OEM_CFG_SCHED_TYPE_MASK 0x0000000C
+#define OEM_CFG_SCHED_TYPE_OFFSET 2
+#define OEM_CFG_SCHED_TYPE_ETS 0x1
+#define OEM_CFG_SCHED_TYPE_VNIC_BW 0x2
+
+ struct lldp_received_tlvs_s lldp_received_tlvs[LLDP_MAX_LLDP_AGENTS];
+ u32 system_lldp_tlvs_buf2[MAX_SYSTEM_LLDP_TLV_DATA];
+ u32 phy_module_temperature;
+ u32 nig_reg_stat_rx_bmb_packet;
+ u32 nig_reg_rx_llh_ncsi_mcp_mask;
+ u32 nig_reg_rx_llh_ncsi_mcp_mask_2;
+ struct pause_flood_monitor pause_flood_monitor;
+ u32 nig_drain_cnt;
+ struct pkt_type_cnt pkt_tc_priority_cnt;
+};
+
+#define MCP_DRV_VER_STR_SIZE 16
+#define MCP_DRV_VER_STR_SIZE_DWORD (MCP_DRV_VER_STR_SIZE / sizeof(u32))
+#define MCP_DRV_NVM_BUF_LEN 32
+struct drv_version_stc {
+ u32 version;
+ u8 name[MCP_DRV_VER_STR_SIZE - 4];
+};
+
+struct public_func {
+ u32 iscsi_boot_signature;
+ u32 iscsi_boot_block_offset;
+
+ u32 mtu_size;
+
+ u32 c2s_pcp_map_lower;
+ u32 c2s_pcp_map_upper;
+ u32 c2s_pcp_map_default;
+
+ struct generic_idc_msg_s generic_idc_msg;
+
+ u32 num_of_msix;
+
+ u32 config;
+#define FUNC_MF_CFG_FUNC_HIDE 0x00000001
+#define FUNC_MF_CFG_PAUSE_ON_HOST_RING 0x00000002
+#define FUNC_MF_CFG_PAUSE_ON_HOST_RING_SHIFT 0x00000001
+
+#define FUNC_MF_CFG_PROTOCOL_MASK 0x000000f0
+#define FUNC_MF_CFG_PROTOCOL_SHIFT 4
+#define FUNC_MF_CFG_PROTOCOL_ETHERNET 0x00000000
+#define FUNC_MF_CFG_PROTOCOL_ISCSI 0x00000010
+#define FUNC_MF_CFG_PROTOCOL_FCOE 0x00000020
+#define FUNC_MF_CFG_PROTOCOL_ROCE 0x00000030
+#define FUNC_MF_CFG_PROTOCOL_MAX 0x00000030
+
+#define FUNC_MF_CFG_MIN_BW_MASK 0x0000ff00
+#define FUNC_MF_CFG_MIN_BW_SHIFT 8
+#define FUNC_MF_CFG_MIN_BW_DEFAULT 0x00000000
+#define FUNC_MF_CFG_MAX_BW_MASK 0x00ff0000
+#define FUNC_MF_CFG_MAX_BW_SHIFT 16
+#define FUNC_MF_CFG_MAX_BW_DEFAULT 0x00640000
+
+ u32 status;
+#define FUNC_STATUS_VIRTUAL_LINK_UP 0x00000001
+
+ u32 mac_upper;
+#define FUNC_MF_CFG_UPPERMAC_MASK 0x0000ffff
+#define FUNC_MF_CFG_UPPERMAC_SHIFT 0
+#define FUNC_MF_CFG_UPPERMAC_DEFAULT FUNC_MF_CFG_UPPERMAC_MASK
+ u32 mac_lower;
+#define FUNC_MF_CFG_LOWERMAC_DEFAULT 0xffffffff
+
+ u32 fcoe_wwn_port_name_upper;
+ u32 fcoe_wwn_port_name_lower;
+
+ u32 fcoe_wwn_node_name_upper;
+ u32 fcoe_wwn_node_name_lower;
+
+ u32 ovlan_stag;
+#define FUNC_MF_CFG_OV_STAG_MASK 0x0000ffff
+#define FUNC_MF_CFG_OV_STAG_SHIFT 0
+#define FUNC_MF_CFG_OV_STAG_DEFAULT FUNC_MF_CFG_OV_STAG_MASK
+
+ u32 pf_allocation;
+
+ u32 preserve_data;
+
+ u32 driver_last_activity_ts;
+
+ u32 drv_ack_vf_disabled[VF_MAX_STATIC / 32];
+
+ u32 drv_id;
+#define DRV_ID_PDA_COMP_VER_MASK 0x0000ffff
+#define DRV_ID_PDA_COMP_VER_SHIFT 0
+
+#define LOAD_REQ_HSI_VERSION 2
+#define DRV_ID_MCP_HSI_VER_MASK 0x00ff0000
+#define DRV_ID_MCP_HSI_VER_SHIFT 16
+#define DRV_ID_MCP_HSI_VER_CURRENT (LOAD_REQ_HSI_VERSION << \
+ DRV_ID_MCP_HSI_VER_SHIFT)
+
+#define DRV_ID_DRV_TYPE_MASK 0x7f000000
+#define DRV_ID_DRV_TYPE_SHIFT 24
+#define DRV_ID_DRV_TYPE_UNKNOWN (0 << DRV_ID_DRV_TYPE_SHIFT)
+#define DRV_ID_DRV_TYPE_LINUX BIT(DRV_ID_DRV_TYPE_SHIFT)
+
+#define DRV_ID_DRV_INIT_HW_MASK 0x80000000
+#define DRV_ID_DRV_INIT_HW_SHIFT 31
+#define DRV_ID_DRV_INIT_HW_FLAG BIT(DRV_ID_DRV_INIT_HW_SHIFT)
+
+ u32 oem_cfg_func;
+#define OEM_CFG_FUNC_TC_MASK 0x0000000F
+#define OEM_CFG_FUNC_TC_OFFSET 0
+#define OEM_CFG_FUNC_TC_0 0x0
+#define OEM_CFG_FUNC_TC_1 0x1
+#define OEM_CFG_FUNC_TC_2 0x2
+#define OEM_CFG_FUNC_TC_3 0x3
+#define OEM_CFG_FUNC_TC_4 0x4
+#define OEM_CFG_FUNC_TC_5 0x5
+#define OEM_CFG_FUNC_TC_6 0x6
+#define OEM_CFG_FUNC_TC_7 0x7
+
+#define OEM_CFG_FUNC_HOST_PRI_CTRL_MASK 0x00000030
+#define OEM_CFG_FUNC_HOST_PRI_CTRL_OFFSET 4
+#define OEM_CFG_FUNC_HOST_PRI_CTRL_VNIC 0x1
+#define OEM_CFG_FUNC_HOST_PRI_CTRL_OS 0x2
+
+ struct drv_version_stc drv_ver;
+};
+
+struct mcp_mac {
+ u32 mac_upper;
+ u32 mac_lower;
+};
+
+struct mcp_file_att {
+ u32 nvm_start_addr;
+ u32 len;
+};
+
+struct bist_nvm_image_att {
+ u32 return_code;
+ u32 image_type;
+ u32 nvm_start_addr;
+ u32 len;
+};
+
+struct lan_stats_stc {
+ u64 ucast_rx_pkts;
+ u64 ucast_tx_pkts;
+ u32 fcs_err;
+ u32 rserved;
+};
+
+struct fcoe_stats_stc {
+ u64 rx_pkts;
+ u64 tx_pkts;
+ u32 fcs_err;
+ u32 login_failure;
+};
+
+struct iscsi_stats_stc {
+ u64 rx_pdus;
+ u64 tx_pdus;
+ u64 rx_bytes;
+ u64 tx_bytes;
+};
+
+struct rdma_stats_stc {
+ u64 rx_pkts;
+ u64 tx_pkts;
+ u64 rx_bytes;
+ u64 tx_bytes;
+};
+
+struct ocbb_data_stc {
+ u32 ocbb_host_addr;
+ u32 ocsd_host_addr;
+ u32 ocsd_req_update_interval;
+};
+
+struct fcoe_cap_stc {
+ u32 max_ios;
+ u32 max_log;
+ u32 max_exch;
+ u32 max_npiv;
+ u32 max_tgt;
+ u32 max_outstnd;
+};
+
+#define MAX_NUM_OF_SENSORS 7
+struct temperature_status_stc {
+ u32 num_of_sensors;
+ u32 sensor[MAX_NUM_OF_SENSORS];
+};
+
+/* crash dump configuration header */
+struct mdump_config_stc {
+ u32 version;
+ u32 config;
+ u32 epoc;
+ u32 num_of_logs;
+ u32 valid_logs;
+};
+
+enum resource_id_enum {
+ RESOURCE_NUM_SB_E = 0,
+ RESOURCE_NUM_L2_QUEUE_E = 1,
+ RESOURCE_NUM_VPORT_E = 2,
+ RESOURCE_NUM_VMQ_E = 3,
+ RESOURCE_FACTOR_NUM_RSS_PF_E = 4,
+ RESOURCE_FACTOR_RSS_PER_VF_E = 5,
+ RESOURCE_NUM_RL_E = 6,
+ RESOURCE_NUM_PQ_E = 7,
+ RESOURCE_NUM_VF_E = 8,
+ RESOURCE_VFC_FILTER_E = 9,
+ RESOURCE_ILT_E = 10,
+ RESOURCE_CQS_E = 11,
+ RESOURCE_GFT_PROFILES_E = 12,
+ RESOURCE_NUM_TC_E = 13,
+ RESOURCE_NUM_RSS_ENGINES_E = 14,
+ RESOURCE_LL2_QUEUE_E = 15,
+ RESOURCE_RDMA_STATS_QUEUE_E = 16,
+ RESOURCE_BDQ_E = 17,
+ RESOURCE_QCN_E = 18,
+ RESOURCE_LLH_FILTER_E = 19,
+ RESOURCE_VF_MAC_ADDR = 20,
+ RESOURCE_LL2_CQS_E = 21,
+ RESOURCE_VF_CNQS = 22,
+ RESOURCE_MAX_NUM,
+ RESOURCE_NUM_INVALID = 0xFFFFFFFF
+};
+
+/* Resource ID is to be filled by the driver in the MB request
+ * Size, offset & flags to be filled by the MFW in the MB response
+ */
+struct resource_info {
+ enum resource_id_enum res_id;
+ u32 size; /* number of allocated resources */
+ u32 offset; /* Offset of the 1st resource */
+ u32 vf_size;
+ u32 vf_offset;
+ u32 flags;
+#define RESOURCE_ELEMENT_STRICT BIT(0)
+};
+
+struct mcp_wwn {
+ u32 wwn_upper;
+ u32 wwn_lower;
+};
+
+#define DRV_ROLE_NONE 0
+#define DRV_ROLE_PREBOOT 1
+#define DRV_ROLE_OS 2
+#define DRV_ROLE_KDUMP 3
+
+struct load_req_stc {
+ u32 drv_ver_0;
+ u32 drv_ver_1;
+ u32 fw_ver;
+ u32 misc0;
+#define LOAD_REQ_ROLE_MASK 0x000000FF
+#define LOAD_REQ_ROLE_SHIFT 0
+#define LOAD_REQ_LOCK_TO_MASK 0x0000FF00
+#define LOAD_REQ_LOCK_TO_SHIFT 8
+#define LOAD_REQ_LOCK_TO_DEFAULT 0
+#define LOAD_REQ_LOCK_TO_NONE 255
+#define LOAD_REQ_FORCE_MASK 0x000F0000
+#define LOAD_REQ_FORCE_SHIFT 16
+#define LOAD_REQ_FORCE_NONE 0
+#define LOAD_REQ_FORCE_PF 1
+#define LOAD_REQ_FORCE_ALL 2
+#define LOAD_REQ_FLAGS0_MASK 0x00F00000
+#define LOAD_REQ_FLAGS0_SHIFT 20
+#define LOAD_REQ_FLAGS0_AVOID_RESET (0x1 << 0)
+};
+
+struct load_rsp_stc {
+ u32 drv_ver_0;
+ u32 drv_ver_1;
+ u32 fw_ver;
+ u32 misc0;
+#define LOAD_RSP_ROLE_MASK 0x000000FF
+#define LOAD_RSP_ROLE_SHIFT 0
+#define LOAD_RSP_HSI_MASK 0x0000FF00
+#define LOAD_RSP_HSI_SHIFT 8
+#define LOAD_RSP_FLAGS0_MASK 0x000F0000
+#define LOAD_RSP_FLAGS0_SHIFT 16
+#define LOAD_RSP_FLAGS0_DRV_EXISTS (0x1 << 0)
+};
+
+struct mdump_retain_data_stc {
+ u32 valid;
+ u32 epoch;
+ u32 pf;
+ u32 status;
+};
+
+struct attribute_cmd_write_stc {
+ u32 val;
+ u32 mask;
+ u32 offset;
+};
+
+struct lldp_stats_stc {
+ u32 tx_frames_total;
+ u32 rx_frames_total;
+ u32 rx_frames_discarded;
+ u32 rx_age_outs;
+};
+
+struct get_att_ctrl_stc {
+ u32 disabled_attns;
+ u32 controllable_attns;
+};
+
+struct trace_filter_stc {
+ u32 level;
+ u32 modules;
+};
+
+union drv_union_data {
+ struct mcp_mac wol_mac;
+
+ struct eth_phy_cfg drv_phy_cfg;
+
+ struct mcp_val64 val64;
+
+ u8 raw_data[MCP_DRV_NVM_BUF_LEN];
+
+ struct mcp_file_att file_att;
+
+ u32 ack_vf_disabled[EXT_VF_BITMAP_SIZE_IN_DWORDS];
+
+ struct drv_version_stc drv_version;
+
+ struct lan_stats_stc lan_stats;
+ struct fcoe_stats_stc fcoe_stats;
+ struct iscsi_stats_stc iscsi_stats;
+ struct rdma_stats_stc rdma_stats;
+ struct ocbb_data_stc ocbb_info;
+ struct temperature_status_stc temp_info;
+ struct resource_info resource;
+ struct bist_nvm_image_att nvm_image_att;
+ struct mdump_config_stc mdump_config;
+ struct mcp_mac lldp_mac;
+ struct mcp_wwn fcoe_fabric_name;
+ u32 dword;
+
+ struct load_req_stc load_req;
+ struct load_rsp_stc load_rsp;
+ struct mdump_retain_data_stc mdump_retain;
+ struct attribute_cmd_write_stc attribute_cmd_write;
+ struct lldp_stats_stc lldp_stats;
+ struct pcie_stats_stc pcie_stats;
+
+ struct get_att_ctrl_stc get_att_ctrl;
+ struct fcoe_cap_stc fcoe_cap;
+ struct trace_filter_stc trace_filter;
+};
+
+struct public_drv_mb {
+ u32 drv_mb_header;
+#define DRV_MSG_SEQ_NUMBER_MASK 0x0000ffff
+#define DRV_MSG_SEQ_NUMBER_OFFSET 0
+#define DRV_MSG_CODE_MASK 0xffff0000
+#define DRV_MSG_CODE_OFFSET 16
+
+ u32 drv_mb_param;
+
+ u32 fw_mb_header;
+#define FW_MSG_SEQ_NUMBER_MASK 0x0000ffff
+#define FW_MSG_SEQ_NUMBER_OFFSET 0
+#define FW_MSG_CODE_MASK 0xffff0000
+#define FW_MSG_CODE_OFFSET 16
+
+ u32 fw_mb_param;
+
+ u32 drv_pulse_mb;
+#define DRV_PULSE_SEQ_MASK 0x00007fff
+#define DRV_PULSE_SYSTEM_TIME_MASK 0xffff0000
+#define DRV_PULSE_ALWAYS_ALIVE 0x00008000
+
+ u32 mcp_pulse_mb;
+#define MCP_PULSE_SEQ_MASK 0x00007fff
+#define MCP_PULSE_ALWAYS_ALIVE 0x00008000
+#define MCP_EVENT_MASK 0xffff0000
+#define MCP_EVENT_OTHER_DRIVER_RESET_REQ 0x00010000
+
+ union drv_union_data union_data;
+};
+
+#define DRV_MSG_CODE(_code_) ((_code_) << DRV_MSG_CODE_OFFSET)
+enum drv_msg_code_enum {
+ DRV_MSG_CODE_NVM_PUT_FILE_BEGIN = DRV_MSG_CODE(0x0001),
+ DRV_MSG_CODE_NVM_PUT_FILE_DATA = DRV_MSG_CODE(0x0002),
+ DRV_MSG_CODE_NVM_GET_FILE_ATT = DRV_MSG_CODE(0x0003),
+ DRV_MSG_CODE_NVM_READ_NVRAM = DRV_MSG_CODE(0x0005),
+ DRV_MSG_CODE_NVM_WRITE_NVRAM = DRV_MSG_CODE(0x0006),
+ DRV_MSG_CODE_MCP_RESET = DRV_MSG_CODE(0x0009),
+ DRV_MSG_CODE_SET_VERSION = DRV_MSG_CODE(0x000f),
+ DRV_MSG_CODE_MCP_HALT = DRV_MSG_CODE(0x0010),
+ DRV_MSG_CODE_SET_VMAC = DRV_MSG_CODE(0x0011),
+ DRV_MSG_CODE_GET_VMAC = DRV_MSG_CODE(0x0012),
+ DRV_MSG_CODE_GET_STATS = DRV_MSG_CODE(0x0013),
+ DRV_MSG_CODE_TRANSCEIVER_READ = DRV_MSG_CODE(0x0016),
+ DRV_MSG_CODE_MASK_PARITIES = DRV_MSG_CODE(0x001a),
+ DRV_MSG_CODE_BIST_TEST = DRV_MSG_CODE(0x001e),
+ DRV_MSG_CODE_SET_LED_MODE = DRV_MSG_CODE(0x0020),
+ DRV_MSG_CODE_RESOURCE_CMD = DRV_MSG_CODE(0x0023),
+ DRV_MSG_CODE_MDUMP_CMD = DRV_MSG_CODE(0x0025),
+ DRV_MSG_CODE_GET_PF_RDMA_PROTOCOL = DRV_MSG_CODE(0x002b),
+ DRV_MSG_CODE_OS_WOL = DRV_MSG_CODE(0x002e),
+ DRV_MSG_CODE_GET_TLV_DONE = DRV_MSG_CODE(0x002f),
+ DRV_MSG_CODE_FEATURE_SUPPORT = DRV_MSG_CODE(0x0030),
+ DRV_MSG_CODE_GET_MFW_FEATURE_SUPPORT = DRV_MSG_CODE(0x0031),
+ DRV_MSG_CODE_GET_ENGINE_CONFIG = DRV_MSG_CODE(0x0037),
+ DRV_MSG_CODE_GET_NVM_CFG_OPTION = DRV_MSG_CODE(0x003e),
+ DRV_MSG_CODE_SET_NVM_CFG_OPTION = DRV_MSG_CODE(0x003f),
+ DRV_MSG_CODE_INITIATE_PF_FLR = DRV_MSG_CODE(0x0201),
+ DRV_MSG_CODE_LOAD_REQ = DRV_MSG_CODE(0x1000),
+ DRV_MSG_CODE_LOAD_DONE = DRV_MSG_CODE(0x1100),
+ DRV_MSG_CODE_INIT_HW = DRV_MSG_CODE(0x1200),
+ DRV_MSG_CODE_CANCEL_LOAD_REQ = DRV_MSG_CODE(0x1300),
+ DRV_MSG_CODE_UNLOAD_REQ = DRV_MSG_CODE(0x2000),
+ DRV_MSG_CODE_UNLOAD_DONE = DRV_MSG_CODE(0x2100),
+ DRV_MSG_CODE_INIT_PHY = DRV_MSG_CODE(0x2200),
+ DRV_MSG_CODE_LINK_RESET = DRV_MSG_CODE(0x2300),
+ DRV_MSG_CODE_SET_DCBX = DRV_MSG_CODE(0x2500),
+ DRV_MSG_CODE_OV_UPDATE_CURR_CFG = DRV_MSG_CODE(0x2600),
+ DRV_MSG_CODE_OV_UPDATE_BUS_NUM = DRV_MSG_CODE(0x2700),
+ DRV_MSG_CODE_OV_UPDATE_BOOT_PROGRESS = DRV_MSG_CODE(0x2800),
+ DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER = DRV_MSG_CODE(0x2900),
+ DRV_MSG_CODE_NIG_DRAIN = DRV_MSG_CODE(0x3000),
+ DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE = DRV_MSG_CODE(0x3100),
+ DRV_MSG_CODE_BW_UPDATE_ACK = DRV_MSG_CODE(0x3200),
+ DRV_MSG_CODE_OV_UPDATE_MTU = DRV_MSG_CODE(0x3300),
+ DRV_MSG_GET_RESOURCE_ALLOC_MSG = DRV_MSG_CODE(0x3400),
+ DRV_MSG_SET_RESOURCE_VALUE_MSG = DRV_MSG_CODE(0x3500),
+ DRV_MSG_CODE_OV_UPDATE_WOL = DRV_MSG_CODE(0x3800),
+ DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE = DRV_MSG_CODE(0x3900),
+ DRV_MSG_CODE_S_TAG_UPDATE_ACK = DRV_MSG_CODE(0x3b00),
+ DRV_MSG_CODE_GET_OEM_UPDATES = DRV_MSG_CODE(0x4100),
+ DRV_MSG_CODE_GET_PPFID_BITMAP = DRV_MSG_CODE(0x4300),
+ DRV_MSG_CODE_VF_DISABLED_DONE = DRV_MSG_CODE(0xc000),
+ DRV_MSG_CODE_CFG_VF_MSIX = DRV_MSG_CODE(0xc001),
+ DRV_MSG_CODE_CFG_PF_VFS_MSIX = DRV_MSG_CODE(0xc002),
+ DRV_MSG_CODE_DEBUG_DATA_SEND = DRV_MSG_CODE(0xc004),
+};
+
+#define DRV_MSG_CODE_VMAC_TYPE_SHIFT 4
+#define DRV_MSG_CODE_VMAC_TYPE_MASK 0x30
+#define DRV_MSG_CODE_VMAC_TYPE_MAC 1
+#define DRV_MSG_CODE_VMAC_TYPE_WWNN 2
+#define DRV_MSG_CODE_VMAC_TYPE_WWPN 3
+
+/* DRV_MSG_CODE_RETAIN_VMAC parameters */
+#define DRV_MSG_CODE_RETAIN_VMAC_FUNC_SHIFT 0
+#define DRV_MSG_CODE_RETAIN_VMAC_FUNC_MASK 0xf
+
+#define DRV_MSG_CODE_RETAIN_VMAC_TYPE_SHIFT 4
+#define DRV_MSG_CODE_RETAIN_VMAC_TYPE_MASK 0x70
+#define DRV_MSG_CODE_RETAIN_VMAC_TYPE_L2 0
+#define DRV_MSG_CODE_RETAIN_VMAC_TYPE_ISCSI 1
+#define DRV_MSG_CODE_RETAIN_VMAC_TYPE_FCOE 2
+#define DRV_MSG_CODE_RETAIN_VMAC_TYPE_WWNN 3
+#define DRV_MSG_CODE_RETAIN_VMAC_TYPE_WWPN 4
+
+#define DRV_MSG_CODE_MCP_RESET_FORCE 0xf04ce
+
+#define DRV_MSG_CODE_STATS_TYPE_LAN 1
+#define DRV_MSG_CODE_STATS_TYPE_FCOE 2
+#define DRV_MSG_CODE_STATS_TYPE_ISCSI 3
+#define DRV_MSG_CODE_STATS_TYPE_RDMA 4
+
+#define BW_MAX_MASK 0x000000ff
+#define BW_MAX_OFFSET 0
+#define BW_MIN_MASK 0x0000ff00
+#define BW_MIN_OFFSET 8
+
+#define DRV_MSG_FAN_FAILURE_TYPE BIT(0)
+#define DRV_MSG_TEMPERATURE_FAILURE_TYPE BIT(1)
+
+#define RESOURCE_CMD_REQ_RESC_MASK 0x0000001F
+#define RESOURCE_CMD_REQ_RESC_SHIFT 0
+#define RESOURCE_CMD_REQ_OPCODE_MASK 0x000000E0
+#define RESOURCE_CMD_REQ_OPCODE_SHIFT 5
+#define RESOURCE_OPCODE_REQ 1
+#define RESOURCE_OPCODE_REQ_WO_AGING 2
+#define RESOURCE_OPCODE_REQ_W_AGING 3
+#define RESOURCE_OPCODE_RELEASE 4
+#define RESOURCE_OPCODE_FORCE_RELEASE 5
+#define RESOURCE_CMD_REQ_AGE_MASK 0x0000FF00
+#define RESOURCE_CMD_REQ_AGE_SHIFT 8
+
+#define RESOURCE_CMD_RSP_OWNER_MASK 0x000000FF
+#define RESOURCE_CMD_RSP_OWNER_SHIFT 0
+#define RESOURCE_CMD_RSP_OPCODE_MASK 0x00000700
+#define RESOURCE_CMD_RSP_OPCODE_SHIFT 8
+#define RESOURCE_OPCODE_GNT 1
+#define RESOURCE_OPCODE_BUSY 2
+#define RESOURCE_OPCODE_RELEASED 3
+#define RESOURCE_OPCODE_RELEASED_PREVIOUS 4
+#define RESOURCE_OPCODE_WRONG_OWNER 5
+#define RESOURCE_OPCODE_UNKNOWN_CMD 255
+
+#define RESOURCE_DUMP 0
+
+/* DRV_MSG_CODE_MDUMP_CMD parameters */
+#define MDUMP_DRV_PARAM_OPCODE_MASK 0x000000ff
+#define DRV_MSG_CODE_MDUMP_ACK 0x01
+#define DRV_MSG_CODE_MDUMP_SET_VALUES 0x02
+#define DRV_MSG_CODE_MDUMP_TRIGGER 0x03
+#define DRV_MSG_CODE_MDUMP_GET_CONFIG 0x04
+#define DRV_MSG_CODE_MDUMP_SET_ENABLE 0x05
+#define DRV_MSG_CODE_MDUMP_CLEAR_LOGS 0x06
+#define DRV_MSG_CODE_MDUMP_GET_RETAIN 0x07
+#define DRV_MSG_CODE_MDUMP_CLR_RETAIN 0x08
+
+#define DRV_MSG_CODE_HW_DUMP_TRIGGER 0x0a
+
+#define DRV_MSG_CODE_MDUMP_FREE_DRIVER_BUF 0x0b
+#define DRV_MSG_CODE_MDUMP_GEN_LINK_DUMP 0x0c
+#define DRV_MSG_CODE_MDUMP_GEN_IDLE_CHK 0x0d
+
+/* DRV_MSG_CODE_MDUMP_CMD options */
+#define MDUMP_DRV_PARAM_OPTION_MASK 0x00000f00
+#define DRV_MSG_CODE_MDUMP_USE_DRIVER_BUF_OFFSET 8
+#define DRV_MSG_CODE_MDUMP_USE_DRIVER_BUF_MASK 0x100
+
+/* DRV_MSG_CODE_EXT_PHY_READ/DRV_MSG_CODE_EXT_PHY_WRITE parameters */
+#define DRV_MB_PARAM_ADDR_SHIFT 0
+#define DRV_MB_PARAM_ADDR_MASK 0x0000FFFF
+#define DRV_MB_PARAM_DEVAD_SHIFT 16
+#define DRV_MB_PARAM_DEVAD_MASK 0x001F0000
+#define DRV_MB_PARAM_PORT_SHIFT 21
+#define DRV_MB_PARAM_PORT_MASK 0x00600000
+
+/* DRV_MSG_CODE_PMBUS_READ/DRV_MSG_CODE_PMBUS_WRITE parameters */
+#define DRV_MB_PARAM_PMBUS_CMD_SHIFT 0
+#define DRV_MB_PARAM_PMBUS_CMD_MASK 0xFF
+#define DRV_MB_PARAM_PMBUS_LEN_SHIFT 8
+#define DRV_MB_PARAM_PMBUS_LEN_MASK 0x300
+#define DRV_MB_PARAM_PMBUS_DATA_SHIFT 16
+#define DRV_MB_PARAM_PMBUS_DATA_MASK 0xFFFF0000
+
+/* UNLOAD_REQ params */
+#define DRV_MB_PARAM_UNLOAD_WOL_UNKNOWN 0x00000000
+#define DRV_MB_PARAM_UNLOAD_WOL_MCP 0x00000001
+#define DRV_MB_PARAM_UNLOAD_WOL_DISABLED 0x00000002
+#define DRV_MB_PARAM_UNLOAD_WOL_ENABLED 0x00000003
+
+/* UNLOAD_DONE_params */
+#define DRV_MB_PARAM_UNLOAD_NON_D3_POWER 0x00000001
+
+/* INIT_PHY params */
+#define DRV_MB_PARAM_INIT_PHY_FORCE 0x00000001
+#define DRV_MB_PARAM_INIT_PHY_DONT_CARE 0x00000002
+
+/* LLDP / DCBX params*/
+#define DRV_MB_PARAM_LLDP_SEND_MASK 0x00000001
+#define DRV_MB_PARAM_LLDP_SEND_SHIFT 0
+#define DRV_MB_PARAM_LLDP_AGENT_MASK 0x00000006
+#define DRV_MB_PARAM_LLDP_AGENT_SHIFT 1
+#define DRV_MB_PARAM_LLDP_TLV_RX_VALID_MASK 0x00000001
+#define DRV_MB_PARAM_LLDP_TLV_RX_VALID_SHIFT 0
+#define DRV_MB_PARAM_LLDP_TLV_RX_TYPE_MASK 0x000007f0
+#define DRV_MB_PARAM_LLDP_TLV_RX_TYPE_SHIFT 4
+#define DRV_MB_PARAM_DCBX_NOTIFY_MASK 0x00000008
+#define DRV_MB_PARAM_DCBX_NOTIFY_SHIFT 3
+#define DRV_MB_PARAM_DCBX_ADMIN_CFG_NOTIFY_MASK 0x00000010
+#define DRV_MB_PARAM_DCBX_ADMIN_CFG_NOTIFY_SHIFT 4
+
+#define DRV_MB_PARAM_NIG_DRAIN_PERIOD_MS_MASK 0x000000FF
+#define DRV_MB_PARAM_NIG_DRAIN_PERIOD_MS_SHIFT 0
+
+#define DRV_MB_PARAM_NVM_PUT_FILE_TYPE_MASK 0x000000ff
+#define DRV_MB_PARAM_NVM_PUT_FILE_TYPE_SHIFT 0
+#define DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MFW 0x1
+#define DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_IMAGE 0x2
+
+#define DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MBI 0x3
+#define DRV_MB_PARAM_NVM_OFFSET_OFFSET 0
+#define DRV_MB_PARAM_NVM_OFFSET_MASK 0x00FFFFFF
+#define DRV_MB_PARAM_NVM_LEN_OFFSET 24
+#define DRV_MB_PARAM_NVM_LEN_MASK 0xFF000000
+
+#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT 0
+#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK 0x000000FF
+#define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT 8
+#define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK 0x0000FF00
+
+#define DRV_MB_PARAM_OV_CURR_CFG_SHIFT 0
+#define DRV_MB_PARAM_OV_CURR_CFG_MASK 0x0000000F
+#define DRV_MB_PARAM_OV_CURR_CFG_NONE 0
+#define DRV_MB_PARAM_OV_CURR_CFG_OS 1
+#define DRV_MB_PARAM_OV_CURR_CFG_VENDOR_SPEC 2
+#define DRV_MB_PARAM_OV_CURR_CFG_OTHER 3
+
+#define DRV_MB_PARAM_OV_STORM_FW_VER_SHIFT 0
+#define DRV_MB_PARAM_OV_STORM_FW_VER_MASK 0xFFFFFFFF
+#define DRV_MB_PARAM_OV_STORM_FW_VER_MAJOR_MASK 0xFF000000
+#define DRV_MB_PARAM_OV_STORM_FW_VER_MINOR_MASK 0x00FF0000
+#define DRV_MB_PARAM_OV_STORM_FW_VER_BUILD_MASK 0x0000FF00
+#define DRV_MB_PARAM_OV_STORM_FW_VER_DROP_MASK 0x000000FF
+
+#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_SHIFT 0
+#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_MASK 0xF
+#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_UNKNOWN 0x1
+#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED 0x2
+#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_LOADING 0x3
+#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED 0x4
+#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE 0x5
+
+#define DRV_MB_PARAM_OV_MTU_SIZE_SHIFT 0
+#define DRV_MB_PARAM_OV_MTU_SIZE_MASK 0xFFFFFFFF
+
+#define DRV_MB_PARAM_WOL_MASK (DRV_MB_PARAM_WOL_DEFAULT | \
+ DRV_MB_PARAM_WOL_DISABLED | \
+ DRV_MB_PARAM_WOL_ENABLED)
+#define DRV_MB_PARAM_WOL_DEFAULT DRV_MB_PARAM_UNLOAD_WOL_MCP
+#define DRV_MB_PARAM_WOL_DISABLED DRV_MB_PARAM_UNLOAD_WOL_DISABLED
+#define DRV_MB_PARAM_WOL_ENABLED DRV_MB_PARAM_UNLOAD_WOL_ENABLED
+
+#define DRV_MB_PARAM_ESWITCH_MODE_MASK (DRV_MB_PARAM_ESWITCH_MODE_NONE | \
+ DRV_MB_PARAM_ESWITCH_MODE_VEB | \
+ DRV_MB_PARAM_ESWITCH_MODE_VEPA)
+#define DRV_MB_PARAM_ESWITCH_MODE_NONE 0x0
+#define DRV_MB_PARAM_ESWITCH_MODE_VEB 0x1
+#define DRV_MB_PARAM_ESWITCH_MODE_VEPA 0x2
+
+#define DRV_MB_PARAM_DUMMY_OEM_UPDATES_MASK 0x1
+#define DRV_MB_PARAM_DUMMY_OEM_UPDATES_OFFSET 0
+
+#define DRV_MB_PARAM_SET_LED_MODE_OPER 0x0
+#define DRV_MB_PARAM_SET_LED_MODE_ON 0x1
+#define DRV_MB_PARAM_SET_LED_MODE_OFF 0x2
+
+#define DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET 0
+#define DRV_MB_PARAM_TRANSCEIVER_PORT_MASK 0x00000003
+#define DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET 2
+#define DRV_MB_PARAM_TRANSCEIVER_SIZE_MASK 0x000000fc
+#define DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET 8
+#define DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK 0x0000ff00
+#define DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET 16
+#define DRV_MB_PARAM_TRANSCEIVER_OFFSET_MASK 0xffff0000
+
+ /* Resource Allocation params - Driver version support */
+#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_MASK 0xffff0000
+#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT 16
+#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_MASK 0x0000ffff
+#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT 0
+
+#define DRV_MB_PARAM_BIST_UNKNOWN_TEST 0
+#define DRV_MB_PARAM_BIST_REGISTER_TEST 1
+#define DRV_MB_PARAM_BIST_CLOCK_TEST 2
+#define DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES 3
+#define DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX 4
+
+#define DRV_MB_PARAM_BIST_RC_UNKNOWN 0
+#define DRV_MB_PARAM_BIST_RC_PASSED 1
+#define DRV_MB_PARAM_BIST_RC_FAILED 2
+#define DRV_MB_PARAM_BIST_RC_INVALID_PARAMETER 3
+
+#define DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT 0
+#define DRV_MB_PARAM_BIST_TEST_INDEX_MASK 0x000000ff
+#define DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_SHIFT 8
+#define DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_MASK 0x0000ff00
+
+#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_MASK 0x0000ffff
+#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_OFFSET 0
+#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_SMARTLINQ 0x00000001
+#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE 0x00000002
+#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_FEC_CONTROL 0x00000004
+#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EXT_SPEED_FEC_CONTROL 0x00000008
+#define DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK 0x00010000
+
+/* DRV_MSG_CODE_DEBUG_DATA_SEND parameters */
+#define DRV_MSG_CODE_DEBUG_DATA_SEND_SIZE_OFFSET 0
+#define DRV_MSG_CODE_DEBUG_DATA_SEND_SIZE_MASK 0xff
+
+/* Driver attributes params */
+#define DRV_MB_PARAM_ATTRIBUTE_KEY_OFFSET 0
+#define DRV_MB_PARAM_ATTRIBUTE_KEY_MASK 0x00ffffff
+#define DRV_MB_PARAM_ATTRIBUTE_CMD_OFFSET 24
+#define DRV_MB_PARAM_ATTRIBUTE_CMD_MASK 0xff000000
+
+#define DRV_MB_PARAM_NVM_CFG_OPTION_ID_OFFSET 0
+#define DRV_MB_PARAM_NVM_CFG_OPTION_ID_MASK 0x0000ffff
+#define DRV_MB_PARAM_NVM_CFG_OPTION_ID_IGNORE 0x0000ffff
+#define DRV_MB_PARAM_NVM_CFG_OPTION_ID_SHIFT 0
+#define DRV_MB_PARAM_NVM_CFG_OPTION_ALL_SHIFT 16
+#define DRV_MB_PARAM_NVM_CFG_OPTION_ALL_MASK 0x00010000
+#define DRV_MB_PARAM_NVM_CFG_OPTION_INIT_SHIFT 17
+#define DRV_MB_PARAM_NVM_CFG_OPTION_INIT_MASK 0x00020000
+#define DRV_MB_PARAM_NVM_CFG_OPTION_COMMIT_SHIFT 18
+#define DRV_MB_PARAM_NVM_CFG_OPTION_COMMIT_MASK 0x00040000
+#define DRV_MB_PARAM_NVM_CFG_OPTION_FREE_SHIFT 19
+#define DRV_MB_PARAM_NVM_CFG_OPTION_FREE_MASK 0x00080000
+#define DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_SEL_SHIFT 20
+#define DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_SEL_MASK 0x00100000
+#define DRV_MB_PARAM_NVM_CFG_OPTION_DEFAULT_RESTORE_ALL_SHIFT 21
+#define DRV_MB_PARAM_NVM_CFG_OPTION_DEFAULT_RESTORE_ALL_MASK 0x00200000
+#define DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_ID_SHIFT 24
+#define DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_ID_MASK 0x0f000000
+
+/*DRV_MSG_CODE_GET_PERM_MAC parametres*/
+#define DRV_MSG_CODE_GET_PERM_MAC_TYPE_SHIFT 0
+#define DRV_MSG_CODE_GET_PERM_MAC_TYPE_MASK 0xF
+#define DRV_MSG_CODE_GET_PERM_MAC_TYPE_PF 0
+#define DRV_MSG_CODE_GET_PERM_MAC_TYPE_BMC 1
+#define DRV_MSG_CODE_GET_PERM_MAC_TYPE_VF 2
+#define DRV_MSG_CODE_GET_PERM_MAC_TYPE_LLDP 3
+#define DRV_MSG_CODE_GET_PERM_MAC_TYPE_MAX 4
+#define DRV_MSG_CODE_GET_PERM_MAC_INDEX_SHIFT 8
+#define DRV_MSG_CODE_GET_PERM_MAC_INDEX_MASK 0xFFFF00
+
+#define FW_MSG_CODE(_code_) ((_code_) << FW_MSG_CODE_OFFSET)
+enum fw_msg_code_enum {
+ FW_MSG_CODE_UNSUPPORTED = FW_MSG_CODE(0x0000),
+ FW_MSG_CODE_NVM_OK = FW_MSG_CODE(0x0001),
+ FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK = FW_MSG_CODE(0x0040),
+ FW_MSG_CODE_PHY_OK = FW_MSG_CODE(0x0011),
+ FW_MSG_CODE_OK = FW_MSG_CODE(0x0016),
+ FW_MSG_CODE_ERROR = FW_MSG_CODE(0x0017),
+ FW_MSG_CODE_TRANSCEIVER_DIAG_OK = FW_MSG_CODE(0x0016),
+ FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT = FW_MSG_CODE(0x0002),
+ FW_MSG_CODE_MDUMP_INVALID_CMD = FW_MSG_CODE(0x0003),
+ FW_MSG_CODE_OS_WOL_SUPPORTED = FW_MSG_CODE(0x0080),
+ FW_MSG_CODE_DRV_CFG_PF_VFS_MSIX_DONE = FW_MSG_CODE(0x0087),
+ FW_MSG_CODE_DRV_LOAD_ENGINE = FW_MSG_CODE(0x1010),
+ FW_MSG_CODE_DRV_LOAD_PORT = FW_MSG_CODE(0x1011),
+ FW_MSG_CODE_DRV_LOAD_FUNCTION = FW_MSG_CODE(0x1012),
+ FW_MSG_CODE_DRV_LOAD_REFUSED_PDA = FW_MSG_CODE(0x1020),
+ FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1 = FW_MSG_CODE(0x1021),
+ FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG = FW_MSG_CODE(0x1022),
+ FW_MSG_CODE_DRV_LOAD_REFUSED_HSI = FW_MSG_CODE(0x1023),
+ FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE = FW_MSG_CODE(0x1030),
+ FW_MSG_CODE_DRV_LOAD_REFUSED_REJECT = FW_MSG_CODE(0x1031),
+ FW_MSG_CODE_DRV_LOAD_DONE = FW_MSG_CODE(0x1110),
+ FW_MSG_CODE_DRV_UNLOAD_ENGINE = FW_MSG_CODE(0x2011),
+ FW_MSG_CODE_DRV_UNLOAD_PORT = FW_MSG_CODE(0x2012),
+ FW_MSG_CODE_DRV_UNLOAD_FUNCTION = FW_MSG_CODE(0x2013),
+ FW_MSG_CODE_DRV_UNLOAD_DONE = FW_MSG_CODE(0x2110),
+ FW_MSG_CODE_RESOURCE_ALLOC_OK = FW_MSG_CODE(0x3400),
+ FW_MSG_CODE_RESOURCE_ALLOC_UNKNOWN = FW_MSG_CODE(0x3500),
+ FW_MSG_CODE_S_TAG_UPDATE_ACK_DONE = FW_MSG_CODE(0x3b00),
+ FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE = FW_MSG_CODE(0xb001),
+ FW_MSG_CODE_DEBUG_NOT_ENABLED = FW_MSG_CODE(0xb00a),
+ FW_MSG_CODE_DEBUG_DATA_SEND_OK = FW_MSG_CODE(0xb00b),
+};
+
+#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_MASK 0xffff0000
+#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT 16
+#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_MASK 0x0000ffff
+#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT 0
+
+/* Get PF RDMA protocol command response */
+#define FW_MB_PARAM_GET_PF_RDMA_NONE 0x0
+#define FW_MB_PARAM_GET_PF_RDMA_ROCE 0x1
+#define FW_MB_PARAM_GET_PF_RDMA_IWARP 0x2
+#define FW_MB_PARAM_GET_PF_RDMA_BOTH 0x3
+
+/* Get MFW feature support response */
+#define FW_MB_PARAM_FEATURE_SUPPORT_SMARTLINQ BIT(0)
+#define FW_MB_PARAM_FEATURE_SUPPORT_EEE BIT(1)
+#define FW_MB_PARAM_FEATURE_SUPPORT_DRV_LOAD_TO BIT(2)
+#define FW_MB_PARAM_FEATURE_SUPPORT_LP_PRES_DET BIT(3)
+#define FW_MB_PARAM_FEATURE_SUPPORT_RELAXED_ORD BIT(4)
+#define FW_MB_PARAM_FEATURE_SUPPORT_FEC_CONTROL BIT(5)
+#define FW_MB_PARAM_FEATURE_SUPPORT_EXT_SPEED_FEC_CONTROL BIT(6)
+#define FW_MB_PARAM_FEATURE_SUPPORT_IGU_CLEANUP BIT(7)
+#define FW_MB_PARAM_FEATURE_SUPPORT_VF_DPM BIT(8)
+#define FW_MB_PARAM_FEATURE_SUPPORT_IDLE_CHK BIT(9)
+#define FW_MB_PARAM_FEATURE_SUPPORT_VLINK BIT(16)
+#define FW_MB_PARAM_FEATURE_SUPPORT_DISABLE_LLDP BIT(17)
+#define FW_MB_PARAM_FEATURE_SUPPORT_ENHANCED_SYS_LCK BIT(18)
+#define FW_MB_PARAM_FEATURE_SUPPORT_RESTORE_DEFAULT_CFG BIT(19)
+
+#define FW_MB_PARAM_MANAGEMENT_STATUS_LOCKDOWN_ENABLED 0x00000001
+
+#define FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR BIT(0)
+
+#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALID_MASK 0x00000001
+#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALID_SHIFT 0
+#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALUE_MASK 0x00000002
+#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALUE_SHIFT 1
+#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALID_MASK 0x00000004
+#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALID_SHIFT 2
+#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALUE_MASK 0x00000008
+#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALUE_SHIFT 3
+
+#define FW_MB_PARAM_PPFID_BITMAP_MASK 0xff
+#define FW_MB_PARAM_PPFID_BITMAP_SHIFT 0
+
+#define FW_MB_PARAM_NVM_PUT_FILE_REQ_OFFSET_MASK 0x00ffffff
+#define FW_MB_PARAM_NVM_PUT_FILE_REQ_OFFSET_SHIFT 0
+#define FW_MB_PARAM_NVM_PUT_FILE_REQ_SIZE_MASK 0xff000000
+#define FW_MB_PARAM_NVM_PUT_FILE_REQ_SIZE_SHIFT 24
+
+enum MFW_DRV_MSG_TYPE {
+ MFW_DRV_MSG_LINK_CHANGE,
+ MFW_DRV_MSG_FLR_FW_ACK_FAILED,
+ MFW_DRV_MSG_VF_DISABLED,
+ MFW_DRV_MSG_LLDP_DATA_UPDATED,
+ MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED,
+ MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED,
+ MFW_DRV_MSG_ERROR_RECOVERY,
+ MFW_DRV_MSG_BW_UPDATE,
+ MFW_DRV_MSG_S_TAG_UPDATE,
+ MFW_DRV_MSG_GET_LAN_STATS,
+ MFW_DRV_MSG_GET_FCOE_STATS,
+ MFW_DRV_MSG_GET_ISCSI_STATS,
+ MFW_DRV_MSG_GET_RDMA_STATS,
+ MFW_DRV_MSG_FAILURE_DETECTED,
+ MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE,
+ MFW_DRV_MSG_CRITICAL_ERROR_OCCURRED,
+ MFW_DRV_MSG_EEE_NEGOTIATION_COMPLETE,
+ MFW_DRV_MSG_GET_TLV_REQ,
+ MFW_DRV_MSG_OEM_CFG_UPDATE,
+ MFW_DRV_MSG_LLDP_RECEIVED_TLVS_UPDATED,
+ MFW_DRV_MSG_GENERIC_IDC,
+ MFW_DRV_MSG_XCVR_TX_FAULT,
+ MFW_DRV_MSG_XCVR_RX_LOS,
+ MFW_DRV_MSG_GET_FCOE_CAP,
+ MFW_DRV_MSG_GEN_LINK_DUMP,
+ MFW_DRV_MSG_GEN_IDLE_CHK,
+ MFW_DRV_MSG_DCBX_ADMIN_CFG_APPLIED,
+ MFW_DRV_MSG_MAX
+};
+
+#define MFW_DRV_MSG_MAX_DWORDS(msgs) ((((msgs) - 1) >> 2) + 1)
+#define MFW_DRV_MSG_DWORD(msg_id) ((msg_id) >> 2)
+#define MFW_DRV_MSG_OFFSET(msg_id) (((msg_id) & 0x3) << 3)
+#define MFW_DRV_MSG_MASK(msg_id) (0xff << MFW_DRV_MSG_OFFSET(msg_id))
+
+struct public_mfw_mb {
+ u32 sup_msgs;
+ u32 msg[MFW_DRV_MSG_MAX_DWORDS(MFW_DRV_MSG_MAX)];
+ u32 ack[MFW_DRV_MSG_MAX_DWORDS(MFW_DRV_MSG_MAX)];
+};
+
+enum public_sections {
+ PUBLIC_DRV_MB,
+ PUBLIC_MFW_MB,
+ PUBLIC_GLOBAL,
+ PUBLIC_PATH,
+ PUBLIC_PORT,
+ PUBLIC_FUNC,
+ PUBLIC_MAX_SECTIONS
+};
+
+struct drv_ver_info_stc {
+ u32 ver;
+ u8 name[32];
+};
+
+/* Runtime data needs about 1/2K. We use 2K to be on the safe side.
+ * Please make sure data does not exceed this size.
+ */
+#define NUM_RUNTIME_DWORDS 16
+struct drv_init_hw_stc {
+ u32 init_hw_bitmask[NUM_RUNTIME_DWORDS];
+ u32 init_hw_data[NUM_RUNTIME_DWORDS * 32];
+};
+
+struct mcp_public_data {
+ u32 num_sections;
+ u32 sections[PUBLIC_MAX_SECTIONS];
+ struct public_drv_mb drv_mb[MCP_GLOB_FUNC_MAX];
+ struct public_mfw_mb mfw_mb[MCP_GLOB_FUNC_MAX];
+ struct public_global global;
+ struct public_path path[MCP_GLOB_PATH_MAX];
+ struct public_port port[MCP_GLOB_PORT_MAX];
+ struct public_func func[MCP_GLOB_FUNC_MAX];
+};
+
+#define I2C_TRANSCEIVER_ADDR 0xa0
+#define MAX_I2C_TRANSACTION_SIZE 16
+#define MAX_I2C_TRANSCEIVER_PAGE_SIZE 256
+
+/* OCBB definitions */
+enum tlvs {
+ /* Category 1: Device Properties */
+ DRV_TLV_CLP_STR,
+ DRV_TLV_CLP_STR_CTD,
+ /* Category 6: Device Configuration */
+ DRV_TLV_SCSI_TO,
+ DRV_TLV_R_T_TOV,
+ DRV_TLV_R_A_TOV,
+ DRV_TLV_E_D_TOV,
+ DRV_TLV_CR_TOV,
+ DRV_TLV_BOOT_TYPE,
+ /* Category 8: Port Configuration */
+ DRV_TLV_NPIV_ENABLED,
+ /* Category 10: Function Configuration */
+ DRV_TLV_FEATURE_FLAGS,
+ DRV_TLV_LOCAL_ADMIN_ADDR,
+ DRV_TLV_ADDITIONAL_MAC_ADDR_1,
+ DRV_TLV_ADDITIONAL_MAC_ADDR_2,
+ DRV_TLV_LSO_MAX_OFFLOAD_SIZE,
+ DRV_TLV_LSO_MIN_SEGMENT_COUNT,
+ DRV_TLV_PROMISCUOUS_MODE,
+ DRV_TLV_TX_DESCRIPTORS_QUEUE_SIZE,
+ DRV_TLV_RX_DESCRIPTORS_QUEUE_SIZE,
+ DRV_TLV_NUM_OF_NET_QUEUE_VMQ_CFG,
+ DRV_TLV_FLEX_NIC_OUTER_VLAN_ID,
+ DRV_TLV_OS_DRIVER_STATES,
+ DRV_TLV_PXE_BOOT_PROGRESS,
+ /* Category 12: FC/FCoE Configuration */
+ DRV_TLV_NPIV_STATE,
+ DRV_TLV_NUM_OF_NPIV_IDS,
+ DRV_TLV_SWITCH_NAME,
+ DRV_TLV_SWITCH_PORT_NUM,
+ DRV_TLV_SWITCH_PORT_ID,
+ DRV_TLV_VENDOR_NAME,
+ DRV_TLV_SWITCH_MODEL,
+ DRV_TLV_SWITCH_FW_VER,
+ DRV_TLV_QOS_PRIORITY_PER_802_1P,
+ DRV_TLV_PORT_ALIAS,
+ DRV_TLV_PORT_STATE,
+ DRV_TLV_FIP_TX_DESCRIPTORS_QUEUE_SIZE,
+ DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_SIZE,
+ DRV_TLV_LINK_FAILURE_COUNT,
+ DRV_TLV_FCOE_BOOT_PROGRESS,
+ /* Category 13: iSCSI Configuration */
+ DRV_TLV_TARGET_LLMNR_ENABLED,
+ DRV_TLV_HEADER_DIGEST_FLAG_ENABLED,
+ DRV_TLV_DATA_DIGEST_FLAG_ENABLED,
+ DRV_TLV_AUTHENTICATION_METHOD,
+ DRV_TLV_ISCSI_BOOT_TARGET_PORTAL,
+ DRV_TLV_MAX_FRAME_SIZE,
+ DRV_TLV_PDU_TX_DESCRIPTORS_QUEUE_SIZE,
+ DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_SIZE,
+ DRV_TLV_ISCSI_BOOT_PROGRESS,
+ /* Category 20: Device Data */
+ DRV_TLV_PCIE_BUS_RX_UTILIZATION,
+ DRV_TLV_PCIE_BUS_TX_UTILIZATION,
+ DRV_TLV_DEVICE_CPU_CORES_UTILIZATION,
+ DRV_TLV_LAST_VALID_DCC_TLV_RECEIVED,
+ DRV_TLV_NCSI_RX_BYTES_RECEIVED,
+ DRV_TLV_NCSI_TX_BYTES_SENT,
+ /* Category 22: Base Port Data */
+ DRV_TLV_RX_DISCARDS,
+ DRV_TLV_RX_ERRORS,
+ DRV_TLV_TX_ERRORS,
+ DRV_TLV_TX_DISCARDS,
+ DRV_TLV_RX_FRAMES_RECEIVED,
+ DRV_TLV_TX_FRAMES_SENT,
+ /* Category 23: FC/FCoE Port Data */
+ DRV_TLV_RX_BROADCAST_PACKETS,
+ DRV_TLV_TX_BROADCAST_PACKETS,
+ /* Category 28: Base Function Data */
+ DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV4,
+ DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV6,
+ DRV_TLV_TX_DESCRIPTOR_QUEUE_AVG_DEPTH,
+ DRV_TLV_RX_DESCRIPTORS_QUEUE_AVG_DEPTH,
+ DRV_TLV_PF_RX_FRAMES_RECEIVED,
+ DRV_TLV_RX_BYTES_RECEIVED,
+ DRV_TLV_PF_TX_FRAMES_SENT,
+ DRV_TLV_TX_BYTES_SENT,
+ DRV_TLV_IOV_OFFLOAD,
+ DRV_TLV_PCI_ERRORS_CAP_ID,
+ DRV_TLV_UNCORRECTABLE_ERROR_STATUS,
+ DRV_TLV_UNCORRECTABLE_ERROR_MASK,
+ DRV_TLV_CORRECTABLE_ERROR_STATUS,
+ DRV_TLV_CORRECTABLE_ERROR_MASK,
+ DRV_TLV_PCI_ERRORS_AECC_REGISTER,
+ DRV_TLV_TX_QUEUES_EMPTY,
+ DRV_TLV_RX_QUEUES_EMPTY,
+ DRV_TLV_TX_QUEUES_FULL,
+ DRV_TLV_RX_QUEUES_FULL,
+ /* Category 29: FC/FCoE Function Data */
+ DRV_TLV_FCOE_TX_DESCRIPTOR_QUEUE_AVG_DEPTH,
+ DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_AVG_DEPTH,
+ DRV_TLV_FCOE_RX_FRAMES_RECEIVED,
+ DRV_TLV_FCOE_RX_BYTES_RECEIVED,
+ DRV_TLV_FCOE_TX_FRAMES_SENT,
+ DRV_TLV_FCOE_TX_BYTES_SENT,
+ DRV_TLV_CRC_ERROR_COUNT,
+ DRV_TLV_CRC_ERROR_1_RECEIVED_SOURCE_FC_ID,
+ DRV_TLV_CRC_ERROR_1_TIMESTAMP,
+ DRV_TLV_CRC_ERROR_2_RECEIVED_SOURCE_FC_ID,
+ DRV_TLV_CRC_ERROR_2_TIMESTAMP,
+ DRV_TLV_CRC_ERROR_3_RECEIVED_SOURCE_FC_ID,
+ DRV_TLV_CRC_ERROR_3_TIMESTAMP,
+ DRV_TLV_CRC_ERROR_4_RECEIVED_SOURCE_FC_ID,
+ DRV_TLV_CRC_ERROR_4_TIMESTAMP,
+ DRV_TLV_CRC_ERROR_5_RECEIVED_SOURCE_FC_ID,
+ DRV_TLV_CRC_ERROR_5_TIMESTAMP,
+ DRV_TLV_LOSS_OF_SYNC_ERROR_COUNT,
+ DRV_TLV_LOSS_OF_SIGNAL_ERRORS,
+ DRV_TLV_PRIMITIVE_SEQUENCE_PROTOCOL_ERROR_COUNT,
+ DRV_TLV_DISPARITY_ERROR_COUNT,
+ DRV_TLV_CODE_VIOLATION_ERROR_COUNT,
+ DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_1,
+ DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_2,
+ DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_3,
+ DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_4,
+ DRV_TLV_LAST_FLOGI_TIMESTAMP,
+ DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_1,
+ DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_2,
+ DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_3,
+ DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_4,
+ DRV_TLV_LAST_FLOGI_ACC_TIMESTAMP,
+ DRV_TLV_LAST_FLOGI_RJT,
+ DRV_TLV_LAST_FLOGI_RJT_TIMESTAMP,
+ DRV_TLV_FDISCS_SENT_COUNT,
+ DRV_TLV_FDISC_ACCS_RECEIVED,
+ DRV_TLV_FDISC_RJTS_RECEIVED,
+ DRV_TLV_PLOGI_SENT_COUNT,
+ DRV_TLV_PLOGI_ACCS_RECEIVED,
+ DRV_TLV_PLOGI_RJTS_RECEIVED,
+ DRV_TLV_PLOGI_1_SENT_DESTINATION_FC_ID,
+ DRV_TLV_PLOGI_1_TIMESTAMP,
+ DRV_TLV_PLOGI_2_SENT_DESTINATION_FC_ID,
+ DRV_TLV_PLOGI_2_TIMESTAMP,
+ DRV_TLV_PLOGI_3_SENT_DESTINATION_FC_ID,
+ DRV_TLV_PLOGI_3_TIMESTAMP,
+ DRV_TLV_PLOGI_4_SENT_DESTINATION_FC_ID,
+ DRV_TLV_PLOGI_4_TIMESTAMP,
+ DRV_TLV_PLOGI_5_SENT_DESTINATION_FC_ID,
+ DRV_TLV_PLOGI_5_TIMESTAMP,
+ DRV_TLV_PLOGI_1_ACC_RECEIVED_SOURCE_FC_ID,
+ DRV_TLV_PLOGI_1_ACC_TIMESTAMP,
+ DRV_TLV_PLOGI_2_ACC_RECEIVED_SOURCE_FC_ID,
+ DRV_TLV_PLOGI_2_ACC_TIMESTAMP,
+ DRV_TLV_PLOGI_3_ACC_RECEIVED_SOURCE_FC_ID,
+ DRV_TLV_PLOGI_3_ACC_TIMESTAMP,
+ DRV_TLV_PLOGI_4_ACC_RECEIVED_SOURCE_FC_ID,
+ DRV_TLV_PLOGI_4_ACC_TIMESTAMP,
+ DRV_TLV_PLOGI_5_ACC_RECEIVED_SOURCE_FC_ID,
+ DRV_TLV_PLOGI_5_ACC_TIMESTAMP,
+ DRV_TLV_LOGOS_ISSUED,
+ DRV_TLV_LOGO_ACCS_RECEIVED,
+ DRV_TLV_LOGO_RJTS_RECEIVED,
+ DRV_TLV_LOGO_1_RECEIVED_SOURCE_FC_ID,
+ DRV_TLV_LOGO_1_TIMESTAMP,
+ DRV_TLV_LOGO_2_RECEIVED_SOURCE_FC_ID,
+ DRV_TLV_LOGO_2_TIMESTAMP,
+ DRV_TLV_LOGO_3_RECEIVED_SOURCE_FC_ID,
+ DRV_TLV_LOGO_3_TIMESTAMP,
+ DRV_TLV_LOGO_4_RECEIVED_SOURCE_FC_ID,
+ DRV_TLV_LOGO_4_TIMESTAMP,
+ DRV_TLV_LOGO_5_RECEIVED_SOURCE_FC_ID,
+ DRV_TLV_LOGO_5_TIMESTAMP,
+ DRV_TLV_LOGOS_RECEIVED,
+ DRV_TLV_ACCS_ISSUED,
+ DRV_TLV_PRLIS_ISSUED,
+ DRV_TLV_ACCS_RECEIVED,
+ DRV_TLV_ABTS_SENT_COUNT,
+ DRV_TLV_ABTS_ACCS_RECEIVED,
+ DRV_TLV_ABTS_RJTS_RECEIVED,
+ DRV_TLV_ABTS_1_SENT_DESTINATION_FC_ID,
+ DRV_TLV_ABTS_1_TIMESTAMP,
+ DRV_TLV_ABTS_2_SENT_DESTINATION_FC_ID,
+ DRV_TLV_ABTS_2_TIMESTAMP,
+ DRV_TLV_ABTS_3_SENT_DESTINATION_FC_ID,
+ DRV_TLV_ABTS_3_TIMESTAMP,
+ DRV_TLV_ABTS_4_SENT_DESTINATION_FC_ID,
+ DRV_TLV_ABTS_4_TIMESTAMP,
+ DRV_TLV_ABTS_5_SENT_DESTINATION_FC_ID,
+ DRV_TLV_ABTS_5_TIMESTAMP,
+ DRV_TLV_RSCNS_RECEIVED,
+ DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_1,
+ DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_2,
+ DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_3,
+ DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_4,
+ DRV_TLV_LUN_RESETS_ISSUED,
+ DRV_TLV_ABORT_TASK_SETS_ISSUED,
+ DRV_TLV_TPRLOS_SENT,
+ DRV_TLV_NOS_SENT_COUNT,
+ DRV_TLV_NOS_RECEIVED_COUNT,
+ DRV_TLV_OLS_COUNT,
+ DRV_TLV_LR_COUNT,
+ DRV_TLV_LRR_COUNT,
+ DRV_TLV_LIP_SENT_COUNT,
+ DRV_TLV_LIP_RECEIVED_COUNT,
+ DRV_TLV_EOFA_COUNT,
+ DRV_TLV_EOFNI_COUNT,
+ DRV_TLV_SCSI_STATUS_CHECK_CONDITION_COUNT,
+ DRV_TLV_SCSI_STATUS_CONDITION_MET_COUNT,
+ DRV_TLV_SCSI_STATUS_BUSY_COUNT,
+ DRV_TLV_SCSI_STATUS_INTERMEDIATE_COUNT,
+ DRV_TLV_SCSI_STATUS_INTERMEDIATE_CONDITION_MET_COUNT,
+ DRV_TLV_SCSI_STATUS_RESERVATION_CONFLICT_COUNT,
+ DRV_TLV_SCSI_STATUS_TASK_SET_FULL_COUNT,
+ DRV_TLV_SCSI_STATUS_ACA_ACTIVE_COUNT,
+ DRV_TLV_SCSI_STATUS_TASK_ABORTED_COUNT,
+ DRV_TLV_SCSI_CHECK_CONDITION_1_RECEIVED_SK_ASC_ASCQ,
+ DRV_TLV_SCSI_CHECK_1_TIMESTAMP,
+ DRV_TLV_SCSI_CHECK_CONDITION_2_RECEIVED_SK_ASC_ASCQ,
+ DRV_TLV_SCSI_CHECK_2_TIMESTAMP,
+ DRV_TLV_SCSI_CHECK_CONDITION_3_RECEIVED_SK_ASC_ASCQ,
+ DRV_TLV_SCSI_CHECK_3_TIMESTAMP,
+ DRV_TLV_SCSI_CHECK_CONDITION_4_RECEIVED_SK_ASC_ASCQ,
+ DRV_TLV_SCSI_CHECK_4_TIMESTAMP,
+ DRV_TLV_SCSI_CHECK_CONDITION_5_RECEIVED_SK_ASC_ASCQ,
+ DRV_TLV_SCSI_CHECK_5_TIMESTAMP,
+ /* Category 30: iSCSI Function Data */
+ DRV_TLV_PDU_TX_DESCRIPTOR_QUEUE_AVG_DEPTH,
+ DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_AVG_DEPTH,
+ DRV_TLV_ISCSI_PDU_RX_FRAMES_RECEIVED,
+ DRV_TLV_ISCSI_PDU_RX_BYTES_RECEIVED,
+ DRV_TLV_ISCSI_PDU_TX_FRAMES_SENT,
+ DRV_TLV_ISCSI_PDU_TX_BYTES_SENT,
+ DRV_TLV_RDMA_DRV_VERSION
+};
+
+#define I2C_DEV_ADDR_A2 0xa2
+#define SFP_EEPROM_A2_TEMPERATURE_ADDR 0x60
+#define SFP_EEPROM_A2_TEMPERATURE_SIZE 2
+#define SFP_EEPROM_A2_VCC_ADDR 0x62
+#define SFP_EEPROM_A2_VCC_SIZE 2
+#define SFP_EEPROM_A2_TX_BIAS_ADDR 0x64
+#define SFP_EEPROM_A2_TX_BIAS_SIZE 2
+#define SFP_EEPROM_A2_TX_POWER_ADDR 0x66
+#define SFP_EEPROM_A2_TX_POWER_SIZE 2
+#define SFP_EEPROM_A2_RX_POWER_ADDR 0x68
+#define SFP_EEPROM_A2_RX_POWER_SIZE 2
+
+#define I2C_DEV_ADDR_A0 0xa0
+#define QSFP_EEPROM_A0_TEMPERATURE_ADDR 0x16
+#define QSFP_EEPROM_A0_TEMPERATURE_SIZE 2
+#define QSFP_EEPROM_A0_VCC_ADDR 0x1a
+#define QSFP_EEPROM_A0_VCC_SIZE 2
+#define QSFP_EEPROM_A0_TX1_BIAS_ADDR 0x2a
+#define QSFP_EEPROM_A0_TX1_BIAS_SIZE 2
+#define QSFP_EEPROM_A0_TX1_POWER_ADDR 0x32
+#define QSFP_EEPROM_A0_TX1_POWER_SIZE 2
+#define QSFP_EEPROM_A0_RX1_POWER_ADDR 0x22
+#define QSFP_EEPROM_A0_RX1_POWER_SIZE 2
+
+struct nvm_cfg_mac_address {
+ u32 mac_addr_hi;
+#define NVM_CFG_MAC_ADDRESS_HI_MASK 0x0000ffff
+#define NVM_CFG_MAC_ADDRESS_HI_OFFSET 0
+
+ u32 mac_addr_lo;
+};
+
+struct nvm_cfg1_glob {
+ u32 generic_cont0;
+#define NVM_CFG1_GLOB_MF_MODE_MASK 0x00000ff0
+#define NVM_CFG1_GLOB_MF_MODE_OFFSET 4
+#define NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED 0x0
+#define NVM_CFG1_GLOB_MF_MODE_DEFAULT 0x1
+#define NVM_CFG1_GLOB_MF_MODE_SPIO4 0x2
+#define NVM_CFG1_GLOB_MF_MODE_NPAR1_0 0x3
+#define NVM_CFG1_GLOB_MF_MODE_NPAR1_5 0x4
+#define NVM_CFG1_GLOB_MF_MODE_NPAR2_0 0x5
+#define NVM_CFG1_GLOB_MF_MODE_BD 0x6
+#define NVM_CFG1_GLOB_MF_MODE_UFP 0x7
+
+ u32 engineering_change[3];
+ u32 manufacturing_id;
+ u32 serial_number[4];
+ u32 pcie_cfg;
+ u32 mgmt_traffic;
+
+ u32 core_cfg;
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK 0x000000ff
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET 0
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_2X40G 0x0
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X50G 0x1
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_1X100G 0x2
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X10G_F 0x3
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X10G_E 0x4
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X20G 0x5
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X40G 0xb
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G 0xc
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G 0xd
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X25G 0xe
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X10G 0xf
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_2X50G_R1 0x11
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_4X50G_R1 0x12
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_1X100G_R2 0x13
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_2X100G_R2 0x14
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_1X100G_R4 0x15
+
+ u32 e_lane_cfg1;
+ u32 e_lane_cfg2;
+ u32 f_lane_cfg1;
+ u32 f_lane_cfg2;
+ u32 mps10_preemphasis;
+ u32 mps10_driver_current;
+ u32 mps25_preemphasis;
+ u32 mps25_driver_current;
+ u32 pci_id;
+ u32 pci_subsys_id;
+ u32 bar;
+ u32 mps10_txfir_main;
+ u32 mps10_txfir_post;
+ u32 mps25_txfir_main;
+ u32 mps25_txfir_post;
+ u32 manufacture_ver;
+ u32 manufacture_time;
+ u32 led_global_settings;
+ u32 generic_cont1;
+
+ u32 mbi_version;
+#define NVM_CFG1_GLOB_MBI_VERSION_0_MASK 0x000000ff
+#define NVM_CFG1_GLOB_MBI_VERSION_0_OFFSET 0
+#define NVM_CFG1_GLOB_MBI_VERSION_1_MASK 0x0000ff00
+#define NVM_CFG1_GLOB_MBI_VERSION_1_OFFSET 8
+#define NVM_CFG1_GLOB_MBI_VERSION_2_MASK 0x00ff0000
+#define NVM_CFG1_GLOB_MBI_VERSION_2_OFFSET 16
+
+ u32 mbi_date;
+ u32 misc_sig;
+
+ u32 device_capabilities;
+#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET 0x1
+#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_FCOE 0x2
+#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ISCSI 0x4
+#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ROCE 0x8
+#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_IWARP 0x10
+
+ u32 power_dissipated;
+ u32 power_consumed;
+ u32 efi_version;
+ u32 multi_network_modes_capability;
+ u32 nvm_cfg_version;
+ u32 nvm_cfg_new_option_seq;
+ u32 nvm_cfg_removed_option_seq;
+ u32 nvm_cfg_updated_value_seq;
+ u32 extended_serial_number[8];
+ u32 option_kit_pn[8];
+ u32 spare_pn[8];
+ u32 mps25_active_txfir_pre;
+ u32 mps25_active_txfir_main;
+ u32 mps25_active_txfir_post;
+ u32 features;
+ u32 tx_rx_eq_25g_hlpc;
+ u32 tx_rx_eq_25g_llpc;
+ u32 tx_rx_eq_25g_ac;
+ u32 tx_rx_eq_10g_pc;
+ u32 tx_rx_eq_10g_ac;
+ u32 tx_rx_eq_1g;
+ u32 tx_rx_eq_25g_bt;
+ u32 tx_rx_eq_10g_bt;
+ u32 generic_cont4;
+ u32 preboot_debug_mode_std;
+ u32 preboot_debug_mode_ext;
+ u32 ext_phy_cfg1;
+ u32 clocks;
+ u32 pre2_generic_cont_1;
+ u32 pre2_generic_cont_2;
+ u32 pre2_generic_cont_3;
+ u32 tx_rx_eq_50g_hlpc;
+ u32 tx_rx_eq_50g_mlpc;
+ u32 tx_rx_eq_50g_llpc;
+ u32 tx_rx_eq_50g_ac;
+ u32 trace_modules;
+ u32 pcie_class_code_fcoe;
+ u32 pcie_class_code_iscsi;
+ u32 no_provisioned_mac;
+ u32 lowest_mbi_version;
+ u32 generic_cont5;
+ u32 pre2_generic_cont_4;
+ u32 reserved[40];
+};
+
+struct nvm_cfg1_path {
+ u32 reserved[1];
+};
+
+struct nvm_cfg1_port {
+ u32 rel_to_opt123;
+ u32 rel_to_opt124;
+
+ u32 generic_cont0;
+#define NVM_CFG1_PORT_DCBX_MODE_MASK 0x000f0000
+#define NVM_CFG1_PORT_DCBX_MODE_OFFSET 16
+#define NVM_CFG1_PORT_DCBX_MODE_DISABLED 0x0
+#define NVM_CFG1_PORT_DCBX_MODE_IEEE 0x1
+#define NVM_CFG1_PORT_DCBX_MODE_CEE 0x2
+#define NVM_CFG1_PORT_DCBX_MODE_DYNAMIC 0x3
+#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_MASK 0x00f00000
+#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_OFFSET 20
+#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_ETHERNET 0x1
+#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_FCOE 0x2
+#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_ISCSI 0x4
+
+ u32 pcie_cfg;
+ u32 features;
+
+ u32 speed_cap_mask;
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK 0x0000ffff
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_OFFSET 0
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G 0x1
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G 0x2
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G 0x4
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G 0x8
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G 0x10
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G 0x20
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G 0x40
+
+ u32 link_settings;
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_MASK 0x0000000f
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET 0
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG 0x0
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_1G 0x1
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_10G 0x2
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_20G 0x3
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_25G 0x4
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_40G 0x5
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_50G 0x6
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_BB_100G 0x7
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_SMARTLINQ 0x8
+#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK 0x00000070
+#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET 4
+#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG 0x1
+#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX 0x2
+#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX 0x4
+#define NVM_CFG1_PORT_FEC_FORCE_MODE_MASK 0x000e0000
+#define NVM_CFG1_PORT_FEC_FORCE_MODE_OFFSET 17
+#define NVM_CFG1_PORT_FEC_FORCE_MODE_NONE 0x0
+#define NVM_CFG1_PORT_FEC_FORCE_MODE_FIRECODE 0x1
+#define NVM_CFG1_PORT_FEC_FORCE_MODE_RS 0x2
+#define NVM_CFG1_PORT_FEC_FORCE_MODE_AUTO 0x7
+
+ u32 phy_cfg;
+ u32 mgmt_traffic;
+
+ u32 ext_phy;
+ /* EEE power saving mode */
+#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_MASK 0x00ff0000
+#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_OFFSET 16
+#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_DISABLED 0x0
+#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_BALANCED 0x1
+#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_AGGRESSIVE 0x2
+#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_LOW_LATENCY 0x3
+
+ u32 mba_cfg1;
+ u32 mba_cfg2;
+ u32 vf_cfg;
+ struct nvm_cfg_mac_address lldp_mac_address;
+ u32 led_port_settings;
+ u32 transceiver_00;
+ u32 device_ids;
+
+ u32 board_cfg;
+#define NVM_CFG1_PORT_PORT_TYPE_MASK 0x000000ff
+#define NVM_CFG1_PORT_PORT_TYPE_OFFSET 0
+#define NVM_CFG1_PORT_PORT_TYPE_UNDEFINED 0x0
+#define NVM_CFG1_PORT_PORT_TYPE_MODULE 0x1
+#define NVM_CFG1_PORT_PORT_TYPE_BACKPLANE 0x2
+#define NVM_CFG1_PORT_PORT_TYPE_EXT_PHY 0x3
+#define NVM_CFG1_PORT_PORT_TYPE_MODULE_SLAVE 0x4
+
+ u32 mnm_10g_cap;
+ u32 mnm_10g_ctrl;
+ u32 mnm_10g_misc;
+ u32 mnm_25g_cap;
+ u32 mnm_25g_ctrl;
+ u32 mnm_25g_misc;
+ u32 mnm_40g_cap;
+ u32 mnm_40g_ctrl;
+ u32 mnm_40g_misc;
+ u32 mnm_50g_cap;
+ u32 mnm_50g_ctrl;
+ u32 mnm_50g_misc;
+ u32 mnm_100g_cap;
+ u32 mnm_100g_ctrl;
+ u32 mnm_100g_misc;
+
+ u32 temperature;
+ u32 ext_phy_cfg1;
+
+ u32 extended_speed;
+#define NVM_CFG1_PORT_EXTENDED_SPEED_MASK 0x0000ffff
+#define NVM_CFG1_PORT_EXTENDED_SPEED_OFFSET 0
+#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_AN 0x1
+#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_1G 0x2
+#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_10G 0x4
+#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_20G 0x8
+#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_25G 0x10
+#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_40G 0x20
+#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_50G_R 0x40
+#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_50G_R2 0x80
+#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_100G_R2 0x100
+#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_100G_R4 0x200
+#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_100G_P4 0x400
+#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_MASK 0xffff0000
+#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_OFFSET 16
+#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_RESERVED 0x1
+#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_1G 0x2
+#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_10G 0x4
+#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_20G 0x8
+#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_25G 0x10
+#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_40G 0x20
+#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_50G_R 0x40
+#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_50G_R2 0x80
+#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_100G_R2 0x100
+#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_100G_R4 0x200
+#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_100G_P4 0x400
+
+ u32 extended_fec_mode;
+ u32 port_generic_cont_01;
+ u32 port_generic_cont_02;
+ u32 phy_temp_monitor;
+ u32 reserved[109];
+};
+
+struct nvm_cfg1_func {
+ struct nvm_cfg_mac_address mac_address;
+ u32 rsrv1;
+ u32 rsrv2;
+ u32 device_id;
+ u32 cmn_cfg;
+ u32 pci_cfg;
+ struct nvm_cfg_mac_address fcoe_node_wwn_mac_addr;
+ struct nvm_cfg_mac_address fcoe_port_wwn_mac_addr;
+ u32 preboot_generic_cfg;
+ u32 features;
+ u32 mf_mode_feature;
+ u32 reserved[6];
+};
+
+struct nvm_cfg1 {
+ struct nvm_cfg1_glob glob;
+ struct nvm_cfg1_path path[MCP_GLOB_PATH_MAX];
+ struct nvm_cfg1_port port[MCP_GLOB_PORT_MAX];
+ struct nvm_cfg1_func func[MCP_GLOB_FUNC_MAX];
+};
+
+struct board_info {
+ u16 vendor_id;
+ u16 eth_did_suffix;
+ u16 sub_vendor_id;
+ u16 sub_device_id;
+ char *board_name;
+ char *friendly_name;
+};
+
+struct trace_module_info {
+ char *module_name;
+};
+
+#define NUM_TRACE_MODULES 25
+
+enum nvm_cfg_sections {
+ NVM_CFG_SECTION_NVM_CFG1,
+ NVM_CFG_SECTION_MAX
+};
+
+struct nvm_cfg {
+ u32 num_sections;
+ u32 sections_offset[NVM_CFG_SECTION_MAX];
+ struct nvm_cfg1 cfg1;
+};
+
+#define PORT_0 0
+#define PORT_1 1
+#define PORT_2 2
+#define PORT_3 3
+
+extern struct spad_layout g_spad;
+struct spad_layout {
+ struct nvm_cfg nvm_cfg;
+ struct mcp_public_data public_data;
+};
+
+#define MCP_SPAD_SIZE 0x00028000 /* 160 KB */
+
+#define SPAD_OFFSET(addr) (((u32)(addr) - (u32)CPU_SPAD_BASE))
+
+#define TO_OFFSIZE(_offset, _size) \
+ ((u32)((((u32)(_offset) >> 2) << OFFSIZE_OFFSET_OFFSET) | \
+ (((u32)(_size) >> 2) << OFFSIZE_SIZE_OFFSET)))
+
+enum spad_sections {
+ SPAD_SECTION_TRACE,
+ SPAD_SECTION_NVM_CFG,
+ SPAD_SECTION_PUBLIC,
+ SPAD_SECTION_PRIVATE,
+ SPAD_SECTION_MAX
+};
+
+#define STRUCT_OFFSET(f) (STATIC_INIT_BASE + \
+ __builtin_offsetof(struct static_init, f))
+
+/* This section is located at a fixed location in the beginning of the
+ * scratchpad, to ensure that the MCP trace is not run over during MFW upgrade.
+ * All the rest of data has a floating location which differs from version to
+ * version, and is pointed by the mcp_meta_data below.
+ * Moreover, the spad_layout section is part of the MFW firmware, and is loaded
+ * with it from nvram in order to clear this portion.
+ */
+struct static_init {
+ u32 num_sections;
+ offsize_t sections[SPAD_SECTION_MAX];
+#define SECTION(_sec_) (*((offsize_t *)(STRUCT_OFFSET(sections[_sec_]))))
+
+ u32 tim_hash[8];
+#define PRESERVED_TIM_HASH ((u8 *)(STRUCT_OFFSET(tim_hash)))
+ u32 tpu_hash[8];
+#define PRESERVED_TPU_HASH ((u8 *)(STRUCT_OFFSET(tpu_hash)))
+ u32 secure_pcie_fw_ver;
+#define SECURE_PCIE_FW_VER (*((u32 *)(STRUCT_OFFSET(secure_pcie_fw_ver))))
+ u32 secure_running_mfw;
+#define SECURE_RUNNING_MFW (*((u32 *)(STRUCT_OFFSET(secure_running_mfw))))
+ struct mcp_trace trace;
+};
+
+#define CRC_MAGIC_VALUE 0xDEBB20E3
+#define CRC32_POLYNOMIAL 0xEDB88320
+#define _KB(x) ((x) * 1024)
+#define _MB(x) (_KB(x) * 1024)
+#define NVM_CRC_SIZE (sizeof(u32))
+enum nvm_sw_arbitrator {
+ NVM_SW_ARB_HOST,
+ NVM_SW_ARB_MCP,
+ NVM_SW_ARB_UART,
+ NVM_SW_ARB_RESERVED
+};
+
+struct legacy_bootstrap_region {
+ u32 magic_value;
+#define NVM_MAGIC_VALUE 0x669955aa
+ u32 sram_start_addr;
+ u32 code_len;
+ u32 code_start_addr;
+ u32 crc;
+};
+
+struct nvm_code_entry {
+ u32 image_type;
+ u32 nvm_start_addr;
+ u32 len;
+ u32 sram_start_addr;
+ u32 sram_run_addr;
+};
+
+enum nvm_image_type {
+ NVM_TYPE_TIM1 = 0x01,
+ NVM_TYPE_TIM2 = 0x02,
+ NVM_TYPE_MIM1 = 0x03,
+ NVM_TYPE_MIM2 = 0x04,
+ NVM_TYPE_MBA = 0x05,
+ NVM_TYPE_MODULES_PN = 0x06,
+ NVM_TYPE_VPD = 0x07,
+ NVM_TYPE_MFW_TRACE1 = 0x08,
+ NVM_TYPE_MFW_TRACE2 = 0x09,
+ NVM_TYPE_NVM_CFG1 = 0x0a,
+ NVM_TYPE_L2B = 0x0b,
+ NVM_TYPE_DIR1 = 0x0c,
+ NVM_TYPE_EAGLE_FW1 = 0x0d,
+ NVM_TYPE_FALCON_FW1 = 0x0e,
+ NVM_TYPE_PCIE_FW1 = 0x0f,
+ NVM_TYPE_HW_SET = 0x10,
+ NVM_TYPE_LIM = 0x11,
+ NVM_TYPE_AVS_FW1 = 0x12,
+ NVM_TYPE_DIR2 = 0x13,
+ NVM_TYPE_CCM = 0x14,
+ NVM_TYPE_EAGLE_FW2 = 0x15,
+ NVM_TYPE_FALCON_FW2 = 0x16,
+ NVM_TYPE_PCIE_FW2 = 0x17,
+ NVM_TYPE_AVS_FW2 = 0x18,
+ NVM_TYPE_INIT_HW = 0x19,
+ NVM_TYPE_DEFAULT_CFG = 0x1a,
+ NVM_TYPE_MDUMP = 0x1b,
+ NVM_TYPE_NVM_META = 0x1c,
+ NVM_TYPE_ISCSI_CFG = 0x1d,
+ NVM_TYPE_FCOE_CFG = 0x1f,
+ NVM_TYPE_ETH_PHY_FW1 = 0x20,
+ NVM_TYPE_ETH_PHY_FW2 = 0x21,
+ NVM_TYPE_BDN = 0x22,
+ NVM_TYPE_8485X_PHY_FW = 0x23,
+ NVM_TYPE_PUB_KEY = 0x24,
+ NVM_TYPE_RECOVERY = 0x25,
+ NVM_TYPE_PLDM = 0x26,
+ NVM_TYPE_UPK1 = 0x27,
+ NVM_TYPE_UPK2 = 0x28,
+ NVM_TYPE_MASTER_KC = 0x29,
+ NVM_TYPE_BACKUP_KC = 0x2a,
+ NVM_TYPE_HW_DUMP = 0x2b,
+ NVM_TYPE_HW_DUMP_OUT = 0x2c,
+ NVM_TYPE_BIN_NVM_META = 0x30,
+ NVM_TYPE_ROM_TEST = 0xf0,
+ NVM_TYPE_88X33X0_PHY_FW = 0x31,
+ NVM_TYPE_88X33X0_PHY_SLAVE_FW = 0x32,
+ NVM_TYPE_IDLE_CHK = 0x33,
+ NVM_TYPE_MAX,
+};
+
+#define MAX_NVM_DIR_ENTRIES 100
+
+struct nvm_dir_meta {
+ u32 dir_id;
+ u32 nvm_dir_addr;
+ u32 num_images;
+ u32 next_mfw_to_run;
+};
+
+struct nvm_dir {
+ s32 seq;
+#define NVM_DIR_NEXT_MFW_MASK 0x00000001
+#define NVM_DIR_SEQ_MASK 0xfffffffe
+#define NVM_DIR_NEXT_MFW(seq) ((seq) & NVM_DIR_NEXT_MFW_MASK)
+#define NVM_DIR_UPDATE_SEQ(_seq, swap_mfw)\
+ ({ \
+ _seq = (((_seq + 2) & \
+ NVM_DIR_SEQ_MASK) | \
+ (NVM_DIR_NEXT_MFW(_seq ^ (swap_mfw))));\
+ })
+
+#define IS_DIR_SEQ_VALID(seq) (((seq) & NVM_DIR_SEQ_MASK) != \
+ NVM_DIR_SEQ_MASK)
+
+ u32 num_images;
+ u32 rsrv;
+ struct nvm_code_entry code[1]; /* Up to MAX_NVM_DIR_ENTRIES */
+};
+
+#define NVM_DIR_SIZE(_num_images) (sizeof(struct nvm_dir) + \
+ ((_num_images) - 1) *\
+ sizeof(struct nvm_code_entry) +\
+ NVM_CRC_SIZE)
+
+struct nvm_vpd_image {
+ u32 format_revision;
+#define VPD_IMAGE_VERSION 1
+
+ u8 vpd_data[1];
+};
+
+#define DIR_ID_1 (0)
+#define DIR_ID_2 (1)
+#define MAX_DIR_IDS (2)
+
+#define MFW_BUNDLE_1 (0)
+#define MFW_BUNDLE_2 (1)
+#define MAX_MFW_BUNDLES (2)
+
+#define FLASH_PAGE_SIZE 0x1000
+#define NVM_DIR_MAX_SIZE (FLASH_PAGE_SIZE)
+#define LEGACY_ASIC_MIM_MAX_SIZE (_KB(1200))
+
+#define FPGA_MIM_MAX_SIZE (0x40000)
+
+#define LIM_MAX_SIZE ((2 * FLASH_PAGE_SIZE) - \
+ sizeof(struct legacy_bootstrap_region) \
+ - NVM_RSV_SIZE)
+#define LIM_OFFSET (NVM_OFFSET(lim_image))
+#define NVM_RSV_SIZE (44)
+#define GET_MIM_MAX_SIZE(is_asic, is_e4) (LEGACY_ASIC_MIM_MAX_SIZE)
+#define GET_MIM_OFFSET(idx, is_asic, is_e4) (NVM_OFFSET(dir[MAX_MFW_BUNDLES])\
+ + (((idx) == NVM_TYPE_MIM2) ? \
+ GET_MIM_MAX_SIZE(is_asic, is_e4)\
+ : 0))
+#define GET_NVM_FIXED_AREA_SIZE(is_asic, is_e4) (sizeof(struct nvm_image) + \
+ GET_MIM_MAX_SIZE(is_asic,\
+ is_e4) * 2)
+
+union nvm_dir_union {
+ struct nvm_dir dir;
+ u8 page[FLASH_PAGE_SIZE];
+};
+
+struct nvm_image {
+ struct legacy_bootstrap_region bootstrap;
+ u8 rsrv[NVM_RSV_SIZE];
+ u8 lim_image[LIM_MAX_SIZE];
+ union nvm_dir_union dir[MAX_MFW_BUNDLES];
+};
+
+#define NVM_OFFSET(f) ((u32_t)((int_ptr_t)(&(((struct nvm_image *)0)->(f)))))
+
+struct hw_set_info {
+ u32 reg_type;
+#define GRC_REG_TYPE 1
+#define PHY_REG_TYPE 2
+#define PCI_REG_TYPE 4
+
+ u32 bank_num;
+ u32 pf_num;
+ u32 operation;
+#define READ_OP 1
+#define WRITE_OP 2
+#define RMW_SET_OP 3
+#define RMW_CLR_OP 4
+
+ u32 reg_addr;
+ u32 reg_data;
+
+ u32 reset_type;
+#define POR_RESET_TYPE BIT(0)
+#define HARD_RESET_TYPE BIT(1)
+#define CORE_RESET_TYPE BIT(2)
+#define MCP_RESET_TYPE BIT(3)
+#define PERSET_ASSERT BIT(4)
+#define PERSET_DEASSERT BIT(5)
+};
+
+struct hw_set_image {
+ u32 format_version;
+#define HW_SET_IMAGE_VERSION 1
+ u32 no_hw_sets;
+ struct hw_set_info hw_sets[1];
+};
+
+#define MAX_SUPPORTED_NVM_OPTIONS 1000
+
+#define NVM_META_BIN_OPTION_OFFSET_MASK 0x0000ffff
+#define NVM_META_BIN_OPTION_OFFSET_SHIFT 0
+#define NVM_META_BIN_OPTION_LEN_MASK 0x00ff0000
+#define NVM_META_BIN_OPTION_LEN_OFFSET 16
+#define NVM_META_BIN_OPTION_ENTITY_MASK 0x03000000
+#define NVM_META_BIN_OPTION_ENTITY_SHIFT 24
+#define NVM_META_BIN_OPTION_ENTITY_GLOB 0
+#define NVM_META_BIN_OPTION_ENTITY_PORT 1
+#define NVM_META_BIN_OPTION_ENTITY_FUNC 2
+#define NVM_META_BIN_OPTION_CONFIG_TYPE_MASK 0x0c000000
+#define NVM_META_BIN_OPTION_CONFIG_TYPE_SHIFT 26
+#define NVM_META_BIN_OPTION_CONFIG_TYPE_USER 0
+#define NVM_META_BIN_OPTION_CONFIG_TYPE_FIXED 1
+#define NVM_META_BIN_OPTION_CONFIG_TYPE_FORCED 2
+
+struct nvm_meta_bin_t {
+ u32 magic;
+#define NVM_META_BIN_MAGIC 0x669955bb
+ u32 version;
+#define NVM_META_BIN_VERSION 1
+ u32 num_options;
+ u32 options[0];
+};
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ooo.c b/drivers/net/ethernet/qlogic/qed/qed_ooo.c
index b8c5641b29a8..5d725f59db24 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ooo.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ooo.c
@@ -26,12 +26,12 @@ static struct qed_ooo_archipelago
u32 idx = (cid & 0xffff) - p_ooo_info->cid_base;
struct qed_ooo_archipelago *p_archipelago;
- if (idx >= p_ooo_info->max_num_archipelagos)
+ if (unlikely(idx >= p_ooo_info->max_num_archipelagos))
return NULL;
p_archipelago = &p_ooo_info->p_archipelagos_mem[idx];
- if (list_empty(&p_archipelago->isles_list))
+ if (unlikely(list_empty(&p_archipelago->isles_list)))
return NULL;
return p_archipelago;
@@ -46,7 +46,7 @@ static struct qed_ooo_isle *qed_ooo_seek_isle(struct qed_hwfn *p_hwfn,
u8 the_num_of_isle = 1;
p_archipelago = qed_ooo_seek_archipelago(p_hwfn, p_ooo_info, cid);
- if (!p_archipelago) {
+ if (unlikely(!p_archipelago)) {
DP_NOTICE(p_hwfn,
"Connection %d is not found in OOO list\n", cid);
return NULL;
@@ -362,7 +362,7 @@ void qed_ooo_add_new_isle(struct qed_hwfn *p_hwfn,
if (ooo_isle > 1) {
p_prev_isle = qed_ooo_seek_isle(p_hwfn,
p_ooo_info, cid, ooo_isle - 1);
- if (!p_prev_isle) {
+ if (unlikely(!p_prev_isle)) {
DP_NOTICE(p_hwfn,
"Isle %d is not found(cid %d)\n",
ooo_isle - 1, cid);
@@ -370,7 +370,7 @@ void qed_ooo_add_new_isle(struct qed_hwfn *p_hwfn,
}
}
p_archipelago = qed_ooo_seek_archipelago(p_hwfn, p_ooo_info, cid);
- if (!p_archipelago && (ooo_isle != 1)) {
+ if (unlikely(!p_archipelago && ooo_isle != 1)) {
DP_NOTICE(p_hwfn,
"Connection %d is not found in OOO list\n", cid);
return;
@@ -381,7 +381,7 @@ void qed_ooo_add_new_isle(struct qed_hwfn *p_hwfn,
struct qed_ooo_isle, list_entry);
list_del(&p_isle->list_entry);
- if (!list_empty(&p_isle->buffers_list)) {
+ if (unlikely(!list_empty(&p_isle->buffers_list))) {
DP_NOTICE(p_hwfn, "Free isle is not empty\n");
INIT_LIST_HEAD(&p_isle->buffers_list);
}
@@ -418,13 +418,13 @@ void qed_ooo_add_new_buffer(struct qed_hwfn *p_hwfn,
struct qed_ooo_isle *p_isle = NULL;
p_isle = qed_ooo_seek_isle(p_hwfn, p_ooo_info, cid, ooo_isle);
- if (!p_isle) {
+ if (unlikely(!p_isle)) {
DP_NOTICE(p_hwfn,
"Isle %d is not found(cid %d)\n", ooo_isle, cid);
return;
}
- if (buffer_side == QED_OOO_LEFT_BUF)
+ if (unlikely(buffer_side == QED_OOO_LEFT_BUF))
list_add(&p_buffer->list_entry, &p_isle->buffers_list);
else
list_add_tail(&p_buffer->list_entry, &p_isle->buffers_list);
@@ -438,7 +438,7 @@ void qed_ooo_join_isles(struct qed_hwfn *p_hwfn,
p_right_isle = qed_ooo_seek_isle(p_hwfn, p_ooo_info, cid,
left_isle + 1);
- if (!p_right_isle) {
+ if (unlikely(!p_right_isle)) {
DP_NOTICE(p_hwfn,
"Right isle %d is not found(cid %d)\n",
left_isle + 1, cid);
@@ -450,7 +450,7 @@ void qed_ooo_join_isles(struct qed_hwfn *p_hwfn,
if (left_isle) {
p_left_isle = qed_ooo_seek_isle(p_hwfn, p_ooo_info, cid,
left_isle);
- if (!p_left_isle) {
+ if (unlikely(!p_left_isle)) {
DP_NOTICE(p_hwfn,
"Left isle %d is not found(cid %d)\n",
left_isle, cid);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
index 4f4b79250a2b..7f3e84b8622d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
@@ -22,6 +22,7 @@
#include "qed.h"
#include "qed_cxt.h"
#include "qed_hsi.h"
+#include "qed_iro_hsi.h"
#include "qed_hw.h"
#include "qed_init_ops.h"
#include "qed_int.h"
@@ -33,7 +34,6 @@
#include "qed_roce.h"
#include "qed_sp.h"
-
int qed_rdma_bmap_alloc(struct qed_hwfn *p_hwfn,
struct qed_bmap *bmap, u32 max_count, char *name)
{
@@ -865,8 +865,8 @@ static void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod)
}
qz_num = p_hwfn->p_rdma_info->queue_zone_base + qz_offset;
- addr = GTT_BAR0_MAP_REG_USDM_RAM +
- USTORM_COMMON_QUEUE_CONS_OFFSET(qz_num);
+ addr = GET_GTT_REG_ADDR(GTT_BAR0_MAP_REG_USDM_RAM,
+ USTORM_COMMON_QUEUE_CONS, qz_num);
REG_WR16(p_hwfn, addr, prod);
@@ -1903,7 +1903,6 @@ void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
val, p_hwfn->dcbx_no_edpm, p_hwfn->db_bar_no_edpm);
}
-
void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
p_hwfn->db_bar_no_edpm = true;
@@ -1966,7 +1965,7 @@ static void qed_rdma_remove_user(void *rdma_cxt, u16 dpi)
static int qed_roce_ll2_set_mac_filter(struct qed_dev *cdev,
u8 *old_mac_address,
- u8 *new_mac_address)
+ const u8 *new_mac_address)
{
int rc = 0;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.h b/drivers/net/ethernet/qlogic/qed/qed_rdma.h
index 6a1de3a25257..2753723011dd 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_rdma.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.h
@@ -168,16 +168,19 @@ static inline bool qed_rdma_is_xrc_qp(struct qed_rdma_qp *qp)
return false;
}
+
#if IS_ENABLED(CONFIG_QED_RDMA)
void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
int qed_rdma_info_alloc(struct qed_hwfn *p_hwfn);
void qed_rdma_info_free(struct qed_hwfn *p_hwfn);
#else
-static inline void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) {}
+static inline void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt) {}
static inline void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt) {}
-static inline int qed_rdma_info_alloc(struct qed_hwfn *p_hwfn) {return -EINVAL;}
+static inline int qed_rdma_info_alloc(struct qed_hwfn *p_hwfn)
+ {return -EINVAL; }
static inline void qed_rdma_info_free(struct qed_hwfn *p_hwfn) {}
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
index da1b7fdcbda7..6f1a52e6beb2 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
@@ -126,6 +126,8 @@
0x1009c4UL
#define QM_REG_PF_EN \
0x2f2ea4UL
+#define QM_REG_RLGLBLUPPERBOUND \
+ 0x2f3c00UL
#define TCFC_REG_WEAK_ENABLE_VF \
0x2d0704UL
#define TCFC_REG_STRONG_ENABLE_PF \
@@ -576,7 +578,7 @@
#define PRS_REG_ENCAPSULATION_TYPE_EN 0x1f0730UL
#define PRS_REG_GRE_PROTOCOL 0x1f0734UL
#define PRS_REG_VXLAN_PORT 0x1f0738UL
-#define PRS_REG_OUTPUT_FORMAT_4_0_BB_K2 0x1f099cUL
+#define PRS_REG_OUTPUT_FORMAT_4_0 0x1f099cUL
#define NIG_REG_ENC_TYPE_ENABLE 0x501058UL
#define NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE (0x1 << 0)
@@ -595,8 +597,8 @@
#define DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN 0x10090cUL
#define DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN 0x100910UL
#define DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN 0x100914UL
-#define DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN_K2_E5 0x10092cUL
-#define DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN_K2_E5 0x100930UL
+#define DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN_K2 0x10092cUL
+#define DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN_K2 0x100930UL
#define NIG_REG_NGE_IP_ENABLE 0x508b28UL
#define NIG_REG_NGE_ETH_ENABLE 0x508b2cUL
@@ -606,7 +608,10 @@
#define QM_REG_WFQPFWEIGHT 0x2f4e80UL
#define QM_REG_WFQVPWEIGHT 0x2fa000UL
-
+#define QM_REG_WFQVPUPPERBOUND \
+ 0x2fb000UL
+#define QM_REG_WFQVPCRD \
+ 0x2fc000UL
#define PGLCS_REG_DBG_SELECT_K2_E5 \
0x001d14UL
#define PGLCS_REG_DBG_DWORD_ENABLE_K2_E5 \
@@ -1437,29 +1442,29 @@
0x1401140UL
#define XSEM_REG_SYNC_DBG_EMPTY \
0x1401160UL
-#define XSEM_REG_SLOW_DBG_ACTIVE_BB_K2 \
+#define XSEM_REG_SLOW_DBG_ACTIVE \
0x1401400UL
-#define XSEM_REG_SLOW_DBG_MODE_BB_K2 \
+#define XSEM_REG_SLOW_DBG_MODE \
0x1401404UL
-#define XSEM_REG_DBG_FRAME_MODE_BB_K2 \
+#define XSEM_REG_DBG_FRAME_MODE \
0x1401408UL
#define XSEM_REG_DBG_GPRE_VECT \
0x1401410UL
-#define XSEM_REG_DBG_MODE1_CFG_BB_K2 \
+#define XSEM_REG_DBG_MODE1_CFG \
0x1401420UL
#define XSEM_REG_FAST_MEMORY \
0x1440000UL
#define YSEM_REG_SYNC_DBG_EMPTY \
0x1501160UL
-#define YSEM_REG_SLOW_DBG_ACTIVE_BB_K2 \
+#define YSEM_REG_SLOW_DBG_ACTIVE \
0x1501400UL
-#define YSEM_REG_SLOW_DBG_MODE_BB_K2 \
+#define YSEM_REG_SLOW_DBG_MODE \
0x1501404UL
-#define YSEM_REG_DBG_FRAME_MODE_BB_K2 \
+#define YSEM_REG_DBG_FRAME_MODE \
0x1501408UL
#define YSEM_REG_DBG_GPRE_VECT \
0x1501410UL
-#define YSEM_REG_DBG_MODE1_CFG_BB_K2 \
+#define YSEM_REG_DBG_MODE1_CFG \
0x1501420UL
#define YSEM_REG_FAST_MEMORY \
0x1540000UL
@@ -1467,15 +1472,15 @@
0x1601140UL
#define PSEM_REG_SYNC_DBG_EMPTY \
0x1601160UL
-#define PSEM_REG_SLOW_DBG_ACTIVE_BB_K2 \
+#define PSEM_REG_SLOW_DBG_ACTIVE \
0x1601400UL
-#define PSEM_REG_SLOW_DBG_MODE_BB_K2 \
+#define PSEM_REG_SLOW_DBG_MODE \
0x1601404UL
-#define PSEM_REG_DBG_FRAME_MODE_BB_K2 \
+#define PSEM_REG_DBG_FRAME_MODE \
0x1601408UL
#define PSEM_REG_DBG_GPRE_VECT \
0x1601410UL
-#define PSEM_REG_DBG_MODE1_CFG_BB_K2 \
+#define PSEM_REG_DBG_MODE1_CFG \
0x1601420UL
#define PSEM_REG_FAST_MEMORY \
0x1640000UL
@@ -1483,15 +1488,15 @@
0x1701140UL
#define TSEM_REG_SYNC_DBG_EMPTY \
0x1701160UL
-#define TSEM_REG_SLOW_DBG_ACTIVE_BB_K2 \
+#define TSEM_REG_SLOW_DBG_ACTIVE \
0x1701400UL
-#define TSEM_REG_SLOW_DBG_MODE_BB_K2 \
+#define TSEM_REG_SLOW_DBG_MODE \
0x1701404UL
-#define TSEM_REG_DBG_FRAME_MODE_BB_K2 \
+#define TSEM_REG_DBG_FRAME_MODE \
0x1701408UL
#define TSEM_REG_DBG_GPRE_VECT \
0x1701410UL
-#define TSEM_REG_DBG_MODE1_CFG_BB_K2 \
+#define TSEM_REG_DBG_MODE1_CFG \
0x1701420UL
#define TSEM_REG_FAST_MEMORY \
0x1740000UL
@@ -1499,15 +1504,15 @@
0x1801140UL
#define MSEM_REG_SYNC_DBG_EMPTY \
0x1801160UL
-#define MSEM_REG_SLOW_DBG_ACTIVE_BB_K2 \
+#define MSEM_REG_SLOW_DBG_ACTIVE \
0x1801400UL
-#define MSEM_REG_SLOW_DBG_MODE_BB_K2 \
+#define MSEM_REG_SLOW_DBG_MODE \
0x1801404UL
-#define MSEM_REG_DBG_FRAME_MODE_BB_K2 \
+#define MSEM_REG_DBG_FRAME_MODE \
0x1801408UL
#define MSEM_REG_DBG_GPRE_VECT \
0x1801410UL
-#define MSEM_REG_DBG_MODE1_CFG_BB_K2 \
+#define MSEM_REG_DBG_MODE1_CFG \
0x1801420UL
#define MSEM_REG_FAST_MEMORY \
0x1840000UL
@@ -1517,21 +1522,21 @@
20480
#define USEM_REG_SYNC_DBG_EMPTY \
0x1901160UL
-#define USEM_REG_SLOW_DBG_ACTIVE_BB_K2 \
+#define USEM_REG_SLOW_DBG_ACTIVE \
0x1901400UL
-#define USEM_REG_SLOW_DBG_MODE_BB_K2 \
+#define USEM_REG_SLOW_DBG_MODE \
0x1901404UL
-#define USEM_REG_DBG_FRAME_MODE_BB_K2 \
+#define USEM_REG_DBG_FRAME_MODE \
0x1901408UL
#define USEM_REG_DBG_GPRE_VECT \
0x1901410UL
-#define USEM_REG_DBG_MODE1_CFG_BB_K2 \
+#define USEM_REG_DBG_MODE1_CFG \
0x1901420UL
#define USEM_REG_FAST_MEMORY \
0x1940000UL
#define SEM_FAST_REG_DBG_MODE23_SRC_DISABLE \
0x000748UL
-#define SEM_FAST_REG_DBG_MODE4_SRC_DISABLE \
+#define SEM_FAST_REG_DBG_MODSRC_DISABLE \
0x00074cUL
#define SEM_FAST_REG_DBG_MODE6_SRC_DISABLE \
0x000750UL
@@ -1561,7 +1566,7 @@
0x341500UL
#define BRB_REG_BIG_RAM_DATA_SIZE \
64
-#define SEM_FAST_REG_STALL_0_BB_K2 \
+#define SEM_FAST_REG_STALL_0 \
0x000488UL
#define SEM_FAST_REG_STALLED \
0x000494UL
@@ -1619,35 +1624,35 @@
0x008c14UL
#define NWS_REG_NWS_CMU_K2 \
0x720000UL
-#define PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2_E5 \
+#define PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2 \
0x000680UL
-#define PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2_E5 \
+#define PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2 \
0x000684UL
-#define PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2_E5 \
+#define PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2 \
0x0006c0UL
-#define PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2_E5 \
+#define PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2 \
0x0006c4UL
-#define MS_REG_MS_CMU_K2_E5 \
+#define MS_REG_MS_CMU_K2 \
0x6a4000UL
-#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2_E5 \
+#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2 \
0x000208UL
-#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2_E5 \
+#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2 \
0x00020cUL
-#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2_E5 \
+#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2 \
0x000210UL
-#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2_E5 \
+#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2 \
0x000214UL
-#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2_E5 \
+#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2 \
0x000208UL
-#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5 \
+#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2 \
0x00020cUL
-#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2_E5 \
+#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2 \
0x000210UL
-#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2_E5 \
+#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2 \
0x000214UL
-#define PHY_PCIE_REG_PHY0_K2_E5 \
+#define PHY_PCIE_REG_PHY0_K2 \
0x620000UL
-#define PHY_PCIE_REG_PHY1_K2_E5 \
+#define PHY_PCIE_REG_PHY1_K2 \
0x624000UL
#define NIG_REG_ROCE_DUPLICATE_TO_HOST 0x5088f0UL
#define NIG_REG_PPF_TO_ENGINE_SEL 0x508900UL
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c
index cf5baa5e59bc..071b4aeaddf2 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_roce.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c
@@ -792,7 +792,6 @@ static int qed_roce_sp_destroy_qp_requester(struct qed_hwfn *p_hwfn,
if (rc)
goto err;
-
/* Free ORQ - only if ramrod succeeded, in case FW is still using it */
dma_free_coherent(&p_hwfn->cdev->pdev->dev,
qp->orq_num_pages * RDMA_RING_PAGE_SIZE,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_selftest.h b/drivers/net/ethernet/qlogic/qed/qed_selftest.h
index e27dd9a4547e..7a3bd749e1e4 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_selftest.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_selftest.h
@@ -6,47 +6,47 @@
#include <linux/types.h>
/**
- * @brief qed_selftest_memory - Perform memory test
+ * qed_selftest_memory(): Perform memory test.
*
- * @param cdev
+ * @cdev: Qed dev pointer.
*
- * @return int
+ * Return: Int.
*/
int qed_selftest_memory(struct qed_dev *cdev);
/**
- * @brief qed_selftest_interrupt - Perform interrupt test
+ * qed_selftest_interrupt(): Perform interrupt test.
*
- * @param cdev
+ * @cdev: Qed dev pointer.
*
- * @return int
+ * Return: Int.
*/
int qed_selftest_interrupt(struct qed_dev *cdev);
/**
- * @brief qed_selftest_register - Perform register test
+ * qed_selftest_register(): Perform register test.
*
- * @param cdev
+ * @cdev: Qed dev pointer.
*
- * @return int
+ * Return: Int.
*/
int qed_selftest_register(struct qed_dev *cdev);
/**
- * @brief qed_selftest_clock - Perform clock test
+ * qed_selftest_clock(): Perform clock test.
*
- * @param cdev
+ * @cdev: Qed dev pointer.
*
- * @return int
+ * Return: Int.
*/
int qed_selftest_clock(struct qed_dev *cdev);
/**
- * @brief qed_selftest_nvram - Perform nvram test
+ * qed_selftest_nvram(): Perform nvram test.
*
- * @param cdev
+ * @cdev: Qed dev pointer.
*
- * @return int
+ * Return: Int.
*/
int qed_selftest_nvram(struct qed_dev *cdev);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h
index 60ff3222bf55..4fb02a5579ee 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h
@@ -23,31 +23,26 @@ enum spq_mode {
};
struct qed_spq_comp_cb {
- void (*function)(struct qed_hwfn *,
- void *,
- union event_ring_data *,
+ void (*function)(struct qed_hwfn *p_hwfn,
+ void *cookie,
+ union event_ring_data *data,
u8 fw_return_code);
void *cookie;
};
/**
- * @brief qed_eth_cqe_completion - handles the completion of a
- * ramrod on the cqe ring
+ * qed_eth_cqe_completion(): handles the completion of a
+ * ramrod on the cqe ring.
*
- * @param p_hwfn
- * @param cqe
+ * @p_hwfn: HW device data.
+ * @cqe: CQE.
*
- * @return int
+ * Return: Int.
*/
int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn,
struct eth_slow_path_rx_cqe *cqe);
-/**
- * @file
- *
- * QED Slow-hwfn queue interface
- */
-
+ /* QED Slow-hwfn queue interface */
union ramrod_data {
struct pf_start_ramrod_data pf_start;
struct pf_update_ramrod_data pf_update;
@@ -58,7 +53,7 @@ union ramrod_data {
struct tx_queue_stop_ramrod_data tx_queue_stop;
struct vport_start_ramrod_data vport_start;
struct vport_stop_ramrod_data vport_stop;
- struct rx_update_gft_filter_data rx_update_gft;
+ struct rx_update_gft_filter_ramrod_data rx_update_gft;
struct vport_update_ramrod_data vport_update;
struct core_rx_start_ramrod_data core_rx_queue_start;
struct core_rx_stop_ramrod_data core_rx_queue_stop;
@@ -207,117 +202,128 @@ struct qed_spq {
};
/**
- * @brief qed_spq_post - Posts a Slow hwfn request to FW, or lacking that
- * Pends it to the future list.
+ * qed_spq_post(): Posts a Slow hwfn request to FW, or lacking that
+ * Pends it to the future list.
*
- * @param p_hwfn
- * @param p_req
+ * @p_hwfn: HW device data.
+ * @p_ent: Ent.
+ * @fw_return_code: Return code from firmware.
*
- * @return int
+ * Return: Int.
*/
int qed_spq_post(struct qed_hwfn *p_hwfn,
struct qed_spq_entry *p_ent,
u8 *fw_return_code);
/**
- * @brief qed_spq_allocate - Alloocates & initializes the SPQ and EQ.
+ * qed_spq_alloc(): Alloocates & initializes the SPQ and EQ.
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
*
- * @return int
+ * Return: Int.
*/
int qed_spq_alloc(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_spq_setup - Reset the SPQ to its start state.
+ * qed_spq_setup(): Reset the SPQ to its start state.
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
+ *
+ * Return: Void.
*/
void qed_spq_setup(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_spq_deallocate - Deallocates the given SPQ struct.
+ * qed_spq_free(): Deallocates the given SPQ struct.
+ *
+ * @p_hwfn: HW device data.
*
- * @param p_hwfn
+ * Return: Void.
*/
void qed_spq_free(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_spq_get_entry - Obtain an entrry from the spq
- * free pool list.
- *
- *
+ * qed_spq_get_entry(): Obtain an entrry from the spq
+ * free pool list.
*
- * @param p_hwfn
- * @param pp_ent
+ * @p_hwfn: HW device data.
+ * @pp_ent: PP ENT.
*
- * @return int
+ * Return: Int.
*/
int
qed_spq_get_entry(struct qed_hwfn *p_hwfn,
struct qed_spq_entry **pp_ent);
/**
- * @brief qed_spq_return_entry - Return an entry to spq free
- * pool list
+ * qed_spq_return_entry(): Return an entry to spq free pool list.
*
- * @param p_hwfn
- * @param p_ent
+ * @p_hwfn: HW device data.
+ * @p_ent: P ENT.
+ *
+ * Return: Void.
*/
void qed_spq_return_entry(struct qed_hwfn *p_hwfn,
struct qed_spq_entry *p_ent);
/**
- * @brief qed_eq_allocate - Allocates & initializes an EQ struct
+ * qed_eq_alloc(): Allocates & initializes an EQ struct.
*
- * @param p_hwfn
- * @param num_elem number of elements in the eq
+ * @p_hwfn: HW device data.
+ * @num_elem: number of elements in the eq.
*
- * @return int
+ * Return: Int.
*/
int qed_eq_alloc(struct qed_hwfn *p_hwfn, u16 num_elem);
/**
- * @brief qed_eq_setup - Reset the EQ to its start state.
+ * qed_eq_setup(): Reset the EQ to its start state.
+ *
+ * @p_hwfn: HW device data.
*
- * @param p_hwfn
+ * Return: Void.
*/
void qed_eq_setup(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_eq_free - deallocates the given EQ struct.
+ * qed_eq_free(): deallocates the given EQ struct.
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
+ *
+ * Return: Void.
*/
void qed_eq_free(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_eq_prod_update - update the FW with default EQ producer
+ * qed_eq_prod_update(): update the FW with default EQ producer.
+ *
+ * @p_hwfn: HW device data.
+ * @prod: Prod.
*
- * @param p_hwfn
- * @param prod
+ * Return: Void.
*/
void qed_eq_prod_update(struct qed_hwfn *p_hwfn,
u16 prod);
/**
- * @brief qed_eq_completion - Completes currently pending EQ elements
+ * qed_eq_completion(): Completes currently pending EQ elements.
*
- * @param p_hwfn
- * @param cookie
+ * @p_hwfn: HW device data.
+ * @cookie: Cookie.
*
- * @return int
+ * Return: Int.
*/
int qed_eq_completion(struct qed_hwfn *p_hwfn,
void *cookie);
/**
- * @brief qed_spq_completion - Completes a single event
+ * qed_spq_completion(): Completes a single event.
*
- * @param p_hwfn
- * @param echo - echo value from cookie (used for determining completion)
- * @param p_data - data from cookie (used in callback function if applicable)
+ * @p_hwfn: HW device data.
+ * @echo: echo value from cookie (used for determining completion).
+ * @fw_return_code: FW return code.
+ * @p_data: data from cookie (used in callback function if applicable).
*
- * @return int
+ * Return: Int.
*/
int qed_spq_completion(struct qed_hwfn *p_hwfn,
__le16 echo,
@@ -325,44 +331,43 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn,
union event_ring_data *p_data);
/**
- * @brief qed_spq_get_cid - Given p_hwfn, return cid for the hwfn's SPQ
+ * qed_spq_get_cid(): Given p_hwfn, return cid for the hwfn's SPQ.
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
*
- * @return u32 - SPQ CID
+ * Return: u32 - SPQ CID.
*/
u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_consq_alloc - Allocates & initializes an ConsQ
- * struct
+ * qed_consq_alloc(): Allocates & initializes an ConsQ struct.
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
*
- * @return int
+ * Return: Int.
*/
int qed_consq_alloc(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_consq_setup - Reset the ConsQ to its start state.
+ * qed_consq_setup(): Reset the ConsQ to its start state.
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
+ *
+ * Return Void.
*/
void qed_consq_setup(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_consq_free - deallocates the given ConsQ struct.
+ * qed_consq_free(): deallocates the given ConsQ struct.
+ *
+ * @p_hwfn: HW device data.
*
- * @param p_hwfn
+ * Return Void.
*/
void qed_consq_free(struct qed_hwfn *p_hwfn);
int qed_spq_pend_post(struct qed_hwfn *p_hwfn);
-/**
- * @file
- *
- * @brief Slow-hwfn low-level commands (Ramrods) function definitions.
- */
+/* Slow-hwfn low-level commands (Ramrods) function definitions. */
#define QED_SP_EQ_COMPLETION 0x01
#define QED_SP_CQE_COMPLETION 0x02
@@ -377,12 +382,15 @@ struct qed_sp_init_data {
};
/**
- * @brief Returns a SPQ entry to the pool / frees the entry if allocated.
- * Should be called on in error flows after initializing the SPQ entry
- * and before posting it.
+ * qed_sp_destroy_request(): Returns a SPQ entry to the pool / frees the
+ * entry if allocated. Should be called on in error
+ * flows after initializing the SPQ entry
+ * and before posting it.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ent: Ent.
*
- * @param p_hwfn
- * @param p_ent
+ * Return: Void.
*/
void qed_sp_destroy_request(struct qed_hwfn *p_hwfn,
struct qed_spq_entry *p_ent);
@@ -394,7 +402,14 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
struct qed_sp_init_data *p_data);
/**
- * @brief qed_sp_pf_start - PF Function Start Ramrod
+ * qed_sp_pf_start(): PF Function Start Ramrod.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @p_tunn: P_tunn.
+ * @allow_npar_tx_switch: Allow NPAR TX Switch.
+ *
+ * Return: Int.
*
* This ramrod is sent to initialize a physical function (PF). It will
* configure the function related parameters and write its completion to the
@@ -404,12 +419,6 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
* allocated by the driver on host memory and its parameters are written
* to the internal RAM of the UStorm by the Function Start Ramrod.
*
- * @param p_hwfn
- * @param p_ptt
- * @param p_tunn
- * @param allow_npar_tx_switch
- *
- * @return int
*/
int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
@@ -418,47 +427,33 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
bool allow_npar_tx_switch);
/**
- * @brief qed_sp_pf_update - PF Function Update Ramrod
+ * qed_sp_pf_update(): PF Function Update Ramrod.
*
- * This ramrod updates function-related parameters. Every parameter can be
- * updated independently, according to configuration flags.
+ * @p_hwfn: HW device data.
*
- * @param p_hwfn
+ * Return: Int.
*
- * @return int
+ * This ramrod updates function-related parameters. Every parameter can be
+ * updated independently, according to configuration flags.
*/
int qed_sp_pf_update(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_sp_pf_update_stag - Update firmware of new outer tag
+ * qed_sp_pf_update_stag(): Update firmware of new outer tag.
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
*
- * @return int
+ * Return: Int.
*/
int qed_sp_pf_update_stag(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_sp_pf_stop - PF Function Stop Ramrod
- *
- * This ramrod is sent to close a Physical Function (PF). It is the last ramrod
- * sent and the last completion written to the PFs Event Ring. This ramrod also
- * deletes the context for the Slowhwfn connection on this PF.
- *
- * @note Not required for first packet.
- *
- * @param p_hwfn
- *
- * @return int
- */
-
-/**
- * @brief qed_sp_pf_update_ufp - PF ufp update Ramrod
+ * qed_sp_pf_update_ufp(): PF ufp update Ramrod.
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
*
- * @return int
+ * Return: Int.
*/
int qed_sp_pf_update_ufp(struct qed_hwfn *p_hwfn);
@@ -470,11 +465,11 @@ int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn,
enum spq_mode comp_mode,
struct qed_spq_comp_cb *p_comp_data);
/**
- * @brief qed_sp_heartbeat_ramrod - Send empty Ramrod
+ * qed_sp_heartbeat_ramrod(): Send empty Ramrod.
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
*
- * @return int
+ * Return: Int.
*/
int qed_sp_heartbeat_ramrod(struct qed_hwfn *p_hwfn);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
index b4ed54ffef9b..648176dfb871 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
@@ -369,8 +369,12 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
qed_chain_get_pbl_phys(&p_hwfn->p_eq->chain));
page_cnt = (u8)qed_chain_get_page_cnt(&p_hwfn->p_eq->chain);
p_ramrod->event_ring_num_pages = page_cnt;
- DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_addr,
+
+ /* Place consolidation queue address in ramrod */
+ DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_base_addr,
qed_chain_get_pbl_phys(&p_hwfn->p_consq->chain));
+ page_cnt = (u8)qed_chain_get_page_cnt(&p_hwfn->p_consq->chain);
+ p_ramrod->consolid_q_num_pages = page_cnt;
qed_tunn_set_pf_start_params(p_hwfn, p_tunn, &p_ramrod->tunnel_config);
@@ -401,8 +405,8 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
if (p_hwfn->cdev->p_iov_info) {
struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
- p_ramrod->base_vf_id = (u8) p_iov->first_vf_in_pf;
- p_ramrod->num_vfs = (u8) p_iov->total_vfs;
+ p_ramrod->base_vf_id = (u8)p_iov->first_vf_in_pf;
+ p_ramrod->num_vfs = (u8)p_iov->total_vfs;
}
p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR;
p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MINOR;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
index 0bc1a0aeb56e..e0473729b161 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_spq.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
@@ -20,6 +20,7 @@
#include "qed_cxt.h"
#include "qed_dev_api.h"
#include "qed_hsi.h"
+#include "qed_iro_hsi.h"
#include "qed_hw.h"
#include "qed_int.h"
#include "qed_iscsi.h"
@@ -31,8 +32,8 @@
#include "qed_rdma.h"
/***************************************************************************
-* Structures & Definitions
-***************************************************************************/
+ * Structures & Definitions
+ ***************************************************************************/
#define SPQ_HIGH_PRI_RESERVE_DEFAULT (1)
@@ -42,8 +43,8 @@
#define SPQ_BLOCK_SLEEP_MS (5)
/***************************************************************************
-* Blocking Imp. (BLOCK/EBLOCK mode)
-***************************************************************************/
+ * Blocking Imp. (BLOCK/EBLOCK mode)
+ ***************************************************************************/
static void qed_spq_blocking_cb(struct qed_hwfn *p_hwfn,
void *cookie,
union event_ring_data *data, u8 fw_return_code)
@@ -149,8 +150,8 @@ err:
}
/***************************************************************************
-* SPQ entries inner API
-***************************************************************************/
+ * SPQ entries inner API
+ ***************************************************************************/
static int qed_spq_fill_entry(struct qed_hwfn *p_hwfn,
struct qed_spq_entry *p_ent)
{
@@ -184,12 +185,12 @@ static int qed_spq_fill_entry(struct qed_hwfn *p_hwfn,
}
/***************************************************************************
-* HSI access
-***************************************************************************/
+ * HSI access
+ ***************************************************************************/
static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn,
struct qed_spq *p_spq)
{
- struct e4_core_conn_context *p_cxt;
+ struct core_conn_context *p_cxt;
struct qed_cxt_info cxt_info;
u16 physical_q;
int rc;
@@ -207,23 +208,20 @@ static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn,
p_cxt = cxt_info.p_cxt;
SET_FIELD(p_cxt->xstorm_ag_context.flags10,
- E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
+ XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
SET_FIELD(p_cxt->xstorm_ag_context.flags1,
- E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
+ XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
SET_FIELD(p_cxt->xstorm_ag_context.flags9,
- E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
+ XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
/* QM physical queue */
physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB);
p_cxt->xstorm_ag_context.physical_q0 = cpu_to_le16(physical_q);
- p_cxt->xstorm_st_context.spq_base_lo =
+ p_cxt->xstorm_st_context.spq_base_addr.lo =
DMA_LO_LE(p_spq->chain.p_phys_addr);
- p_cxt->xstorm_st_context.spq_base_hi =
+ p_cxt->xstorm_st_context.spq_base_addr.hi =
DMA_HI_LE(p_spq->chain.p_phys_addr);
-
- DMA_REGPAIR_LE(p_cxt->xstorm_st_context.consolid_base_addr,
- p_hwfn->p_consq->chain.p_phys_addr);
}
static int qed_spq_hw_post(struct qed_hwfn *p_hwfn,
@@ -265,8 +263,8 @@ static int qed_spq_hw_post(struct qed_hwfn *p_hwfn,
}
/***************************************************************************
-* Asynchronous events
-***************************************************************************/
+ * Asynchronous events
+ ***************************************************************************/
static int
qed_async_event_completion(struct qed_hwfn *p_hwfn,
struct event_ring_entry *p_eqe)
@@ -311,12 +309,12 @@ qed_spq_unregister_async_cb(struct qed_hwfn *p_hwfn,
}
/***************************************************************************
-* EQ API
-***************************************************************************/
+ * EQ API
+ ***************************************************************************/
void qed_eq_prod_update(struct qed_hwfn *p_hwfn, u16 prod)
{
- u32 addr = GTT_BAR0_MAP_REG_USDM_RAM +
- USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id);
+ u32 addr = GET_GTT_REG_ADDR(GTT_BAR0_MAP_REG_USDM_RAM,
+ USTORM_EQE_CONS, p_hwfn->rel_pf_id);
REG_WR16(p_hwfn, addr, prod);
}
@@ -433,8 +431,8 @@ void qed_eq_free(struct qed_hwfn *p_hwfn)
}
/***************************************************************************
-* CQE API - manipulate EQ functionality
-***************************************************************************/
+ * CQE API - manipulate EQ functionality
+ ***************************************************************************/
static int qed_cqe_completion(struct qed_hwfn *p_hwfn,
struct eth_slow_path_rx_cqe *cqe,
enum protocol_type protocol)
@@ -464,8 +462,8 @@ int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn,
}
/***************************************************************************
-* Slow hwfn Queue (spq)
-***************************************************************************/
+ * Slow hwfn Queue (spq)
+ ***************************************************************************/
void qed_spq_setup(struct qed_hwfn *p_hwfn)
{
struct qed_spq *p_spq = p_hwfn->p_spq;
@@ -548,7 +546,7 @@ int qed_spq_alloc(struct qed_hwfn *p_hwfn)
int ret;
/* SPQ struct */
- p_spq = kzalloc(sizeof(struct qed_spq), GFP_KERNEL);
+ p_spq = kzalloc(sizeof(*p_spq), GFP_KERNEL);
if (!p_spq)
return -ENOMEM;
@@ -676,7 +674,6 @@ static int qed_spq_add_entry(struct qed_hwfn *p_hwfn,
struct qed_spq *p_spq = p_hwfn->p_spq;
if (p_ent->queue == &p_spq->unlimited_pending) {
-
if (list_empty(&p_spq->free_pool)) {
list_add_tail(&p_ent->list, &p_spq->unlimited_pending);
p_spq->unlimited_pending_count++;
@@ -725,8 +722,8 @@ static int qed_spq_add_entry(struct qed_hwfn *p_hwfn,
}
/***************************************************************************
-* Accessor
-***************************************************************************/
+ * Accessor
+ ***************************************************************************/
u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn)
{
if (!p_hwfn->p_spq)
@@ -735,8 +732,8 @@ u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn)
}
/***************************************************************************
-* Posting new Ramrods
-***************************************************************************/
+ * Posting new Ramrods
+ ***************************************************************************/
static int qed_spq_post_list(struct qed_hwfn *p_hwfn,
struct list_head *head, u32 keep_reserve)
{
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
index ed2b6fe5a78d..8ac38828ba45 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
@@ -11,6 +11,7 @@
#include <linux/qed/qed_iov_if.h>
#include "qed_cxt.h"
#include "qed_hsi.h"
+#include "qed_iro_hsi.h"
#include "qed_hw.h"
#include "qed_init_ops.h"
#include "qed_int.h"
@@ -19,12 +20,13 @@
#include "qed_sp.h"
#include "qed_sriov.h"
#include "qed_vf.h"
-static int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
- u8 opcode,
- __le16 echo,
- union event_ring_data *data, u8 fw_return_code);
static int qed_iov_bulletin_set_mac(struct qed_hwfn *p_hwfn, u8 *mac, int vfid);
+static u16 qed_vf_from_entity_id(__le16 entity_id)
+{
+ return le16_to_cpu(entity_id) - MAX_NUM_PFS;
+}
+
static u8 qed_vf_calculate_legacy(struct qed_vf_info *p_vf)
{
u8 legacy = 0;
@@ -169,8 +171,8 @@ static struct qed_vf_info *qed_iov_get_vf_info(struct qed_hwfn *p_hwfn,
b_enabled_only, false))
vf = &p_hwfn->pf_iov_info->vfs_array[relative_vf_id];
else
- DP_ERR(p_hwfn, "qed_iov_get_vf_info: VF[%d] is not enabled\n",
- relative_vf_id);
+ DP_ERR(p_hwfn, "%s: VF[%d] is not enabled\n",
+ __func__, relative_vf_id);
return vf;
}
@@ -308,7 +310,7 @@ static int qed_iov_post_vf_bulletin(struct qed_hwfn *p_hwfn,
struct qed_dmae_params params;
struct qed_vf_info *p_vf;
- p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+ p_vf = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
if (!p_vf)
return -EINVAL;
@@ -420,7 +422,7 @@ static void qed_iov_setup_vfdb(struct qed_hwfn *p_hwfn)
bulletin_p = p_iov_info->bulletins_phys;
if (!p_req_virt_addr || !p_reply_virt_addr || !p_bulletin_virt) {
DP_ERR(p_hwfn,
- "qed_iov_setup_vfdb called without allocating mem first\n");
+ "%s called without allocating mem first\n", __func__);
return;
}
@@ -464,7 +466,7 @@ static int qed_iov_allocate_vfdb(struct qed_hwfn *p_hwfn)
num_vfs = p_hwfn->cdev->p_iov_info->total_vfs;
DP_VERBOSE(p_hwfn, QED_MSG_IOV,
- "qed_iov_allocate_vfdb for %d VFs\n", num_vfs);
+ "%s for %d VFs\n", __func__, num_vfs);
/* Allocate PF Mailbox buffer (per-VF) */
p_iov_info->mbx_msg_size = sizeof(union vfpf_tlvs) * num_vfs;
@@ -500,10 +502,10 @@ static int qed_iov_allocate_vfdb(struct qed_hwfn *p_hwfn)
QED_MSG_IOV,
"PF's Requests mailbox [%p virt 0x%llx phys], Response mailbox [%p virt 0x%llx phys] Bulletins [%p virt 0x%llx phys]\n",
p_iov_info->mbx_msg_virt_addr,
- (u64) p_iov_info->mbx_msg_phys_addr,
+ (u64)p_iov_info->mbx_msg_phys_addr,
p_iov_info->mbx_reply_virt_addr,
- (u64) p_iov_info->mbx_reply_phys_addr,
- p_iov_info->p_bulletins, (u64) p_iov_info->bulletins_phys);
+ (u64)p_iov_info->mbx_reply_phys_addr,
+ p_iov_info->p_bulletins, (u64)p_iov_info->bulletins_phys);
return 0;
}
@@ -608,7 +610,7 @@ int qed_iov_hw_info(struct qed_hwfn *p_hwfn)
if (rc)
return rc;
- /* We want PF IOV to be synonemous with the existance of p_iov_info;
+ /* We want PF IOV to be synonemous with the existence of p_iov_info;
* In case the capability is published but there are no VFs, simply
* de-allocate the struct.
*/
@@ -714,12 +716,12 @@ static void qed_iov_vf_igu_reset(struct qed_hwfn *p_hwfn,
int i;
/* Set VF masks and configuration - pretend */
- qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid);
+ qed_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid);
qed_wr(p_hwfn, p_ptt, IGU_REG_STATISTIC_NUM_VF_MSG_SENT, 0);
/* unpretend */
- qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
+ qed_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
/* iterate over all queues, clear sb consumer */
for (i = 0; i < vf->num_sbs; i++)
@@ -734,7 +736,7 @@ static void qed_iov_vf_igu_set_int(struct qed_hwfn *p_hwfn,
{
u32 igu_vf_conf;
- qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid);
+ qed_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid);
igu_vf_conf = qed_rd(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION);
@@ -746,7 +748,7 @@ static void qed_iov_vf_igu_set_int(struct qed_hwfn *p_hwfn,
qed_wr(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION, igu_vf_conf);
/* unpretend */
- qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
+ qed_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
}
static int
@@ -807,7 +809,7 @@ static int qed_iov_enable_vf_access(struct qed_hwfn *p_hwfn,
if (rc)
return rc;
- qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid);
+ qed_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid);
SET_FIELD(igu_vf_conf, IGU_VF_CONF_PARENT, p_hwfn->rel_pf_id);
STORE_RT_REG(p_hwfn, IGU_REG_VF_CONFIGURATION_RT_OFFSET, igu_vf_conf);
@@ -816,7 +818,7 @@ static int qed_iov_enable_vf_access(struct qed_hwfn *p_hwfn,
p_hwfn->hw_info.hw_mode);
/* unpretend */
- qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
+ qed_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
vf->state = VF_FREE;
@@ -904,7 +906,7 @@ static u8 qed_iov_alloc_vf_igu_sbs(struct qed_hwfn *p_hwfn,
p_block->igu_sb_id * sizeof(u64), 2, NULL);
}
- vf->num_sbs = (u8) num_rx_queues;
+ vf->num_sbs = (u8)num_rx_queues;
return vf->num_sbs;
}
@@ -988,7 +990,7 @@ static int qed_iov_init_hw_for_vf(struct qed_hwfn *p_hwfn,
vf = qed_iov_get_vf_info(p_hwfn, p_params->rel_vf_id, false);
if (!vf) {
- DP_ERR(p_hwfn, "qed_iov_init_hw_for_vf : vf is NULL\n");
+ DP_ERR(p_hwfn, "%s : vf is NULL\n", __func__);
return -EINVAL;
}
@@ -1092,7 +1094,7 @@ static int qed_iov_release_hw_for_vf(struct qed_hwfn *p_hwfn,
vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true);
if (!vf) {
- DP_ERR(p_hwfn, "qed_iov_release_hw_for_vf : vf is NULL\n");
+ DP_ERR(p_hwfn, "%s : vf is NULL\n", __func__);
return -EINVAL;
}
@@ -1220,8 +1222,8 @@ static void qed_iov_send_response(struct qed_hwfn *p_hwfn,
* channel would be re-set to ready prior to that.
*/
REG_WR(p_hwfn,
- GTT_BAR0_MAP_REG_USDM_RAM +
- USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id), 1);
+ GET_GTT_REG_ADDR(GTT_BAR0_MAP_REG_USDM_RAM,
+ USTORM_VF_PF_CHANNEL_READY, eng_vf_id), 1);
qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys,
mbx->req_virt->first_tlv.reply_address,
@@ -1545,7 +1547,7 @@ static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn,
memset(resp, 0, sizeof(*resp));
/* Write the PF version so that VF would know which version
- * is supported - might be later overriden. This guarantees that
+ * is supported - might be later overridden. This guarantees that
* VF could recognize legacy PF based on lack of versions in reply.
*/
pfdev_info->major_fp_hsi = ETH_HSI_VER_MAJOR;
@@ -1603,7 +1605,7 @@ static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn,
/* fill in pfdev info */
pfdev_info->chip_num = p_hwfn->cdev->chip_num;
pfdev_info->db_size = 0;
- pfdev_info->indices_per_sb = PIS_PER_SB_E4;
+ pfdev_info->indices_per_sb = PIS_PER_SB;
pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED |
PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE;
@@ -1897,7 +1899,7 @@ static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn,
int sb_id;
int rc;
- vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vf->relative_vf_id, true);
+ vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vf->relative_vf_id, true);
if (!vf_info) {
DP_NOTICE(p_hwfn->cdev,
"Failed to get VF info, invalid vfid [%d]\n",
@@ -1957,7 +1959,7 @@ static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn,
rc = qed_sp_eth_vport_start(p_hwfn, &params);
if (rc) {
DP_ERR(p_hwfn,
- "qed_iov_vf_mbx_start_vport returned error %d\n", rc);
+ "%s returned error %d\n", __func__, rc);
status = PFVF_STATUS_FAILURE;
} else {
vf->vport_instance++;
@@ -1993,8 +1995,8 @@ static void qed_iov_vf_mbx_stop_vport(struct qed_hwfn *p_hwfn,
rc = qed_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id);
if (rc) {
- DP_ERR(p_hwfn, "qed_iov_vf_mbx_stop_vport returned error %d\n",
- rc);
+ DP_ERR(p_hwfn, "%s returned error %d\n",
+ __func__, rc);
status = PFVF_STATUS_FAILURE;
}
@@ -2138,10 +2140,10 @@ static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn,
* calculate on their own and clean the producer prior to this.
*/
if (!(vf_legacy & QED_QCID_LEGACY_VF_RX_PROD))
- REG_WR(p_hwfn,
- GTT_BAR0_MAP_REG_MSDM_RAM +
- MSTORM_ETH_VF_PRODS_OFFSET(vf->abs_vf_id, req->rx_qid),
- 0);
+ qed_wr(p_hwfn, p_ptt, MSEM_REG_FAST_MEMORY +
+ SEM_FAST_REG_INT_RAM +
+ MSTORM_ETH_VF_PRODS_OFFSET(vf->abs_vf_id,
+ req->rx_qid), 0);
rc = qed_eth_rxq_start_ramrod(p_hwfn, p_cid,
req->bd_max_bytes,
@@ -3030,7 +3032,7 @@ static void qed_iov_vf_mbx_vport_update(struct qed_hwfn *p_hwfn,
goto out;
}
p_rss_params = vzalloc(sizeof(*p_rss_params));
- if (p_rss_params == NULL) {
+ if (!p_rss_params) {
status = PFVF_STATUS_FAILURE;
goto out;
}
@@ -3550,6 +3552,7 @@ out:
qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_COALESCE_UPDATE,
sizeof(struct pfvf_def_resp_tlv), status);
}
+
static int
qed_iov_vf_flr_poll_dorq(struct qed_hwfn *p_hwfn,
struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
@@ -3557,7 +3560,7 @@ qed_iov_vf_flr_poll_dorq(struct qed_hwfn *p_hwfn,
int cnt;
u32 val;
- qed_fid_pretend(p_hwfn, p_ptt, (u16) p_vf->concrete_fid);
+ qed_fid_pretend(p_hwfn, p_ptt, (u16)p_vf->concrete_fid);
for (cnt = 0; cnt < 50; cnt++) {
val = qed_rd(p_hwfn, p_ptt, DORQ_REG_VF_USAGE_CNT);
@@ -3565,7 +3568,7 @@ qed_iov_vf_flr_poll_dorq(struct qed_hwfn *p_hwfn,
break;
msleep(20);
}
- qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
+ qed_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
if (cnt == 50) {
DP_ERR(p_hwfn,
@@ -3577,48 +3580,73 @@ qed_iov_vf_flr_poll_dorq(struct qed_hwfn *p_hwfn,
return 0;
}
+#define MAX_NUM_EXT_VOQS (MAX_NUM_PORTS * NUM_OF_TCS)
+
static int
qed_iov_vf_flr_poll_pbf(struct qed_hwfn *p_hwfn,
struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
{
- u32 cons[MAX_NUM_VOQS_E4], distance[MAX_NUM_VOQS_E4];
- int i, cnt;
+ u32 prod, cons[MAX_NUM_EXT_VOQS], distance[MAX_NUM_EXT_VOQS], tmp;
+ u8 max_phys_tcs_per_port = p_hwfn->qm_info.max_phys_tcs_per_port;
+ u8 max_ports_per_engine = p_hwfn->cdev->num_ports_in_engine;
+ u32 prod_voq0_addr = PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0;
+ u32 cons_voq0_addr = PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0;
+ u8 port_id, tc, tc_id = 0, voq = 0;
+ int cnt;
- /* Read initial consumers & producers */
- for (i = 0; i < MAX_NUM_VOQS_E4; i++) {
- u32 prod;
+ memset(cons, 0, MAX_NUM_EXT_VOQS * sizeof(u32));
+ memset(distance, 0, MAX_NUM_EXT_VOQS * sizeof(u32));
- cons[i] = qed_rd(p_hwfn, p_ptt,
- PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
- i * 0x40);
- prod = qed_rd(p_hwfn, p_ptt,
- PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0 +
- i * 0x40);
- distance[i] = prod - cons[i];
+ /* Read initial consumers & producers */
+ for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
+ /* "max_phys_tcs_per_port" active TCs + 1 pure LB TC */
+ for (tc = 0; tc < max_phys_tcs_per_port + 1; tc++) {
+ tc_id = (tc < max_phys_tcs_per_port) ? tc : PURE_LB_TC;
+ voq = VOQ(port_id, tc_id, max_phys_tcs_per_port);
+ cons[voq] = qed_rd(p_hwfn, p_ptt,
+ cons_voq0_addr + voq * 0x40);
+ prod = qed_rd(p_hwfn, p_ptt,
+ prod_voq0_addr + voq * 0x40);
+ distance[voq] = prod - cons[voq];
+ }
}
/* Wait for consumers to pass the producers */
- i = 0;
+ port_id = 0;
+ tc = 0;
for (cnt = 0; cnt < 50; cnt++) {
- for (; i < MAX_NUM_VOQS_E4; i++) {
- u32 tmp;
+ for (; port_id < max_ports_per_engine; port_id++) {
+ /* "max_phys_tcs_per_port" active TCs + 1 pure LB TC */
+ for (; tc < max_phys_tcs_per_port + 1; tc++) {
+ tc_id = (tc < max_phys_tcs_per_port) ?
+ tc : PURE_LB_TC;
+ voq = VOQ(port_id,
+ tc_id, max_phys_tcs_per_port);
+ tmp = qed_rd(p_hwfn, p_ptt,
+ cons_voq0_addr + voq * 0x40);
+ if (distance[voq] > tmp - cons[voq])
+ break;
+ }
- tmp = qed_rd(p_hwfn, p_ptt,
- PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
- i * 0x40);
- if (distance[i] > tmp - cons[i])
+ if (tc == max_phys_tcs_per_port + 1)
+ tc = 0;
+ else
break;
}
- if (i == MAX_NUM_VOQS_E4)
+ if (port_id == max_ports_per_engine)
break;
msleep(20);
}
if (cnt == 50) {
- DP_ERR(p_hwfn, "VF[%d] - pbf polling failed on VOQ %d\n",
- p_vf->abs_vf_id, i);
+ DP_ERR(p_hwfn, "VF[%d]: pbf poll failed on VOQ%d\n",
+ p_vf->abs_vf_id, (int)voq);
+
+ DP_ERR(p_hwfn, "VOQ %d has port_id as %d and tc_id as %d]\n",
+ (int)voq, (int)port_id, (int)tc_id);
+
return -EBUSY;
}
@@ -3680,8 +3708,8 @@ qed_iov_execute_vf_flr_cleanup(struct qed_hwfn *p_hwfn,
* doesn't do that as a part of FLR.
*/
REG_WR(p_hwfn,
- GTT_BAR0_MAP_REG_USDM_RAM +
- USTORM_VF_PF_CHANNEL_READY_OFFSET(vfid), 1);
+ GET_GTT_REG_ADDR(GTT_BAR0_MAP_REG_USDM_RAM,
+ USTORM_VF_PF_CHANNEL_READY, vfid), 1);
/* VF_STOPPED has to be set only after final cleanup
* but prior to re-enabling the VF.
@@ -3842,7 +3870,7 @@ static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn,
struct qed_iov_vf_mbx *mbx;
struct qed_vf_info *p_vf;
- p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+ p_vf = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
if (!p_vf)
return;
@@ -3979,7 +4007,7 @@ static void qed_iov_pf_get_pending_events(struct qed_hwfn *p_hwfn, u64 *events)
static struct qed_vf_info *qed_sriov_get_vf_from_absid(struct qed_hwfn *p_hwfn,
u16 abs_vfid)
{
- u8 min = (u8) p_hwfn->cdev->p_iov_info->first_vf_in_pf;
+ u8 min = (u8)p_hwfn->cdev->p_iov_info->first_vf_in_pf;
if (!_qed_iov_pf_sanity_check(p_hwfn, (int)abs_vfid - min, false)) {
DP_VERBOSE(p_hwfn,
@@ -3989,7 +4017,7 @@ static struct qed_vf_info *qed_sriov_get_vf_from_absid(struct qed_hwfn *p_hwfn,
return NULL;
}
- return &p_hwfn->pf_iov_info->vfs_array[(u8) abs_vfid - min];
+ return &p_hwfn->pf_iov_info->vfs_array[(u8)abs_vfid - min];
}
static int qed_sriov_vfpf_msg(struct qed_hwfn *p_hwfn,
@@ -4013,13 +4041,13 @@ static int qed_sriov_vfpf_msg(struct qed_hwfn *p_hwfn,
return 0;
}
-static void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn,
- struct malicious_vf_eqe_data *p_data)
+void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn,
+ struct fw_err_data *p_data)
{
struct qed_vf_info *p_vf;
- p_vf = qed_sriov_get_vf_from_absid(p_hwfn, p_data->vf_id);
-
+ p_vf = qed_sriov_get_vf_from_absid(p_hwfn, qed_vf_from_entity_id
+ (p_data->entity_id));
if (!p_vf)
return;
@@ -4036,16 +4064,13 @@ static void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn,
}
}
-static int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, u8 opcode, __le16 echo,
- union event_ring_data *data, u8 fw_return_code)
+int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, u8 opcode, __le16 echo,
+ union event_ring_data *data, u8 fw_return_code)
{
switch (opcode) {
case COMMON_EVENT_VF_PF_CHANNEL:
return qed_sriov_vfpf_msg(p_hwfn, le16_to_cpu(echo),
&data->vf_pf_channel.msg_addr);
- case COMMON_EVENT_MALICIOUS_VF:
- qed_sriov_vfpf_malicious(p_hwfn, &data->malicious_vf);
- return 0;
default:
DP_INFO(p_hwfn->cdev, "Unknown sriov eqe event 0x%02x\n",
opcode);
@@ -4075,7 +4100,7 @@ static int qed_iov_copy_vf_msg(struct qed_hwfn *p_hwfn, struct qed_ptt *ptt,
struct qed_dmae_params params;
struct qed_vf_info *vf_info;
- vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+ vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
if (!vf_info)
return -EINVAL;
@@ -4176,7 +4201,7 @@ static void qed_iov_bulletin_set_forced_vlan(struct qed_hwfn *p_hwfn,
struct qed_vf_info *vf_info;
u64 feature;
- vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+ vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
if (!vf_info) {
DP_NOTICE(p_hwfn->cdev,
"Can not set forced MAC, invalid vfid [%d]\n", vfid);
@@ -4226,7 +4251,7 @@ static bool qed_iov_vf_has_vport_instance(struct qed_hwfn *p_hwfn, int vfid)
{
struct qed_vf_info *p_vf_info;
- p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+ p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
if (!p_vf_info)
return false;
@@ -4237,7 +4262,7 @@ static bool qed_iov_is_vf_stopped(struct qed_hwfn *p_hwfn, int vfid)
{
struct qed_vf_info *p_vf_info;
- p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+ p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
if (!p_vf_info)
return true;
@@ -4248,7 +4273,7 @@ static bool qed_iov_spoofchk_get(struct qed_hwfn *p_hwfn, int vfid)
{
struct qed_vf_info *vf_info;
- vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+ vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
if (!vf_info)
return false;
@@ -4266,7 +4291,7 @@ static int qed_iov_spoofchk_set(struct qed_hwfn *p_hwfn, int vfid, bool val)
goto out;
}
- vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+ vf = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
if (!vf)
goto out;
@@ -4345,7 +4370,8 @@ static int qed_iov_configure_tx_rate(struct qed_hwfn *p_hwfn,
return rc;
rl_id = abs_vp_id; /* The "rl_id" is set as the "vport_id" */
- return qed_init_global_rl(p_hwfn, p_ptt, rl_id, (u32)val);
+ return qed_init_global_rl(p_hwfn, p_ptt, rl_id, (u32)val,
+ QM_RL_TYPE_NORMAL);
}
static int
@@ -4376,7 +4402,7 @@ static int qed_iov_get_vf_min_rate(struct qed_hwfn *p_hwfn, int vfid)
struct qed_wfq_data *vf_vp_wfq;
struct qed_vf_info *vf_info;
- vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+ vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
if (!vf_info)
return 0;
@@ -4395,8 +4421,10 @@ static int qed_iov_get_vf_min_rate(struct qed_hwfn *p_hwfn, int vfid)
*/
void qed_schedule_iov(struct qed_hwfn *hwfn, enum qed_iov_wq_flag flag)
{
+ /* Memory barrier for setting atomic bit */
smp_mb__before_atomic();
set_bit(flag, &hwfn->iov_task_flags);
+ /* Memory barrier after setting atomic bit */
smp_mb__after_atomic();
DP_VERBOSE(hwfn, QED_MSG_IOV, "Scheduling iov task [Flag: %d]\n", flag);
queue_delayed_work(hwfn->iov_wq, &hwfn->iov_task, 0);
@@ -4407,8 +4435,8 @@ void qed_vf_start_iov_wq(struct qed_dev *cdev)
int i;
for_each_hwfn(cdev, i)
- queue_delayed_work(cdev->hwfns[i].iov_wq,
- &cdev->hwfns[i].iov_task, 0);
+ queue_delayed_work(cdev->hwfns[i].iov_wq,
+ &cdev->hwfns[i].iov_task, 0);
}
int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled)
@@ -4416,8 +4444,8 @@ int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled)
int i, j;
for_each_hwfn(cdev, i)
- if (cdev->hwfns[i].iov_wq)
- flush_workqueue(cdev->hwfns[i].iov_wq);
+ if (cdev->hwfns[i].iov_wq)
+ flush_workqueue(cdev->hwfns[i].iov_wq);
/* Mark VFs for disablement */
qed_iov_set_vfs_to_disable(cdev, true);
@@ -5010,7 +5038,7 @@ static void qed_handle_bulletin_post(struct qed_hwfn *hwfn)
}
qed_for_each_vf(hwfn, i)
- qed_iov_post_vf_bulletin(hwfn, i, ptt);
+ qed_iov_post_vf_bulletin(hwfn, i, ptt);
qed_ptt_release(hwfn, ptt);
}
@@ -5196,7 +5224,6 @@ void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first)
cancel_delayed_work_sync(&cdev->hwfns[i].iov_task);
}
- flush_workqueue(cdev->hwfns[i].iov_wq);
destroy_workqueue(cdev->hwfns[i].iov_wq);
}
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.h b/drivers/net/ethernet/qlogic/qed/qed_sriov.h
index eacd6457f195..f448e3dd6c8b 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.h
@@ -142,7 +142,7 @@ struct qed_vf_queue {
enum vf_state {
VF_FREE = 0, /* VF ready to be acquired holds no resc */
- VF_ACQUIRED, /* VF, acquired, but not initalized */
+ VF_ACQUIRED, /* VF, acquired, but not initialized */
VF_ENABLED, /* VF, Enabled */
VF_RESET, /* VF, FLR'd, pending cleanup */
VF_STOPPED /* VF, Stopped */
@@ -250,29 +250,31 @@ extern const struct qed_iov_hv_ops qed_iov_ops_pass;
#ifdef CONFIG_QED_SRIOV
/**
- * @brief Check if given VF ID @vfid is valid
- * w.r.t. @b_enabled_only value
- * if b_enabled_only = true - only enabled VF id is valid
- * else any VF id less than max_vfs is valid
+ * qed_iov_is_valid_vfid(): Check if given VF ID @vfid is valid
+ * w.r.t. @b_enabled_only value
+ * if b_enabled_only = true - only enabled
+ * VF id is valid.
+ * else any VF id less than max_vfs is valid.
*
- * @param p_hwfn
- * @param rel_vf_id - Relative VF ID
- * @param b_enabled_only - consider only enabled VF
- * @param b_non_malicious - true iff we want to validate vf isn't malicious.
+ * @p_hwfn: HW device data.
+ * @rel_vf_id: Relative VF ID.
+ * @b_enabled_only: consider only enabled VF.
+ * @b_non_malicious: true iff we want to validate vf isn't malicious.
*
- * @return bool - true for valid VF ID
+ * Return: bool - true for valid VF ID
*/
bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
int rel_vf_id,
bool b_enabled_only, bool b_non_malicious);
/**
- * @brief - Given a VF index, return index of next [including that] active VF.
+ * qed_iov_get_next_active_vf(): Given a VF index, return index of
+ * next [including that] active VF.
*
- * @param p_hwfn
- * @param rel_vf_id
+ * @p_hwfn: HW device data.
+ * @rel_vf_id: VF ID.
*
- * @return MAX_NUM_VFS in case no further active VFs, otherwise index.
+ * Return: MAX_NUM_VFS in case no further active VFs, otherwise index.
*/
u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id);
@@ -280,83 +282,117 @@ void qed_iov_bulletin_set_udp_ports(struct qed_hwfn *p_hwfn,
int vfid, u16 vxlan_port, u16 geneve_port);
/**
- * @brief Read sriov related information and allocated resources
- * reads from configuration space, shmem, etc.
+ * qed_iov_hw_info(): Read sriov related information and allocated resources
+ * reads from configuration space, shmem, etc.
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
*
- * @return int
+ * Return: Int.
*/
int qed_iov_hw_info(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_add_tlv - place a given tlv on the tlv buffer at next offset
+ * qed_add_tlv(): place a given tlv on the tlv buffer at next offset
*
- * @param p_hwfn
- * @param p_iov
- * @param type
- * @param length
+ * @p_hwfn: HW device data.
+ * @offset: offset.
+ * @type: Type
+ * @length: Length.
*
- * @return pointer to the newly placed tlv
+ * Return: pointer to the newly placed tlv
*/
void *qed_add_tlv(struct qed_hwfn *p_hwfn, u8 **offset, u16 type, u16 length);
/**
- * @brief list the types and lengths of the tlvs on the buffer
+ * qed_dp_tlv_list(): list the types and lengths of the tlvs on the buffer
*
- * @param p_hwfn
- * @param tlvs_list
+ * @p_hwfn: HW device data.
+ * @tlvs_list: Tlvs_list.
+ *
+ * Return: Void.
*/
void qed_dp_tlv_list(struct qed_hwfn *p_hwfn, void *tlvs_list);
/**
- * @brief qed_iov_alloc - allocate sriov related resources
+ * qed_sriov_vfpf_malicious(): Handle malicious VF/PF.
+ *
+ * @p_hwfn: HW device data.
+ * @p_data: Pointer to data.
+ *
+ * Return: Void.
+ */
+void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn,
+ struct fw_err_data *p_data);
+
+/**
+ * qed_sriov_eqe_event(): Callback for SRIOV events.
+ *
+ * @p_hwfn: HW device data.
+ * @opcode: Opcode.
+ * @echo: Echo.
+ * @data: data
+ * @fw_return_code: FW return code.
+ *
+ * Return: Int.
+ */
+int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, u8 opcode, __le16 echo,
+ union event_ring_data *data, u8 fw_return_code);
+
+/**
+ * qed_iov_alloc(): allocate sriov related resources
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
*
- * @return int
+ * Return: Int.
*/
int qed_iov_alloc(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_iov_setup - setup sriov related resources
+ * qed_iov_setup(): setup sriov related resources
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
+ *
+ * Return: Void.
*/
void qed_iov_setup(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_iov_free - free sriov related resources
+ * qed_iov_free(): free sriov related resources
+ *
+ * @p_hwfn: HW device data.
*
- * @param p_hwfn
+ * Return: Void.
*/
void qed_iov_free(struct qed_hwfn *p_hwfn);
/**
- * @brief free sriov related memory that was allocated during hw_prepare
+ * qed_iov_free_hw_info(): free sriov related memory that was
+ * allocated during hw_prepare
+ *
+ * @cdev: Qed dev pointer.
*
- * @param cdev
+ * Return: Void.
*/
void qed_iov_free_hw_info(struct qed_dev *cdev);
/**
- * @brief Mark structs of vfs that have been FLR-ed.
+ * qed_iov_mark_vf_flr(): Mark structs of vfs that have been FLR-ed.
*
- * @param p_hwfn
- * @param disabled_vfs - bitmask of all VFs on path that were FLRed
+ * @p_hwfn: HW device data.
+ * @disabled_vfs: bitmask of all VFs on path that were FLRed
*
- * @return true iff one of the PF's vfs got FLRed. false otherwise.
+ * Return: true iff one of the PF's vfs got FLRed. false otherwise.
*/
bool qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *disabled_vfs);
/**
- * @brief Search extended TLVs in request/reply buffer.
+ * qed_iov_search_list_tlvs(): Search extended TLVs in request/reply buffer.
*
- * @param p_hwfn
- * @param p_tlvs_list - Pointer to tlvs list
- * @param req_type - Type of TLV
+ * @p_hwfn: HW device data.
+ * @p_tlvs_list: Pointer to tlvs list
+ * @req_type: Type of TLV
*
- * @return pointer to tlv type if found, otherwise returns NULL.
+ * Return: pointer to tlv type if found, otherwise returns NULL.
*/
void *qed_iov_search_list_tlvs(struct qed_hwfn *p_hwfn,
void *p_tlvs_list, u16 req_type);
@@ -442,6 +478,18 @@ static inline int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled)
static inline void qed_inform_vf_link_state(struct qed_hwfn *hwfn)
{
}
+
+static inline void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn,
+ struct fw_err_data *p_data)
+{
+}
+
+static inline int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, u8 opcode,
+ __le16 echo, union event_ring_data *data,
+ u8 fw_return_code)
+{
+ return 0;
+}
#endif
#define qed_for_each_vf(_p_hwfn, _i) \
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c
index 72a38d53d33f..597cd9cd57b5 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_vf.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c
@@ -27,7 +27,7 @@ static void *qed_vf_pf_prep(struct qed_hwfn *p_hwfn, u16 type, u16 length)
"preparing to send 0x%04x tlv over vf pf channel\n",
type);
- /* Reset Requst offset */
+ /* Reset Request offset */
p_iov->offset = (u8 *)p_iov->vf2pf_request;
/* Clear mailbox - both request and reply */
@@ -444,7 +444,7 @@ int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn)
u32 reg;
int rc;
- /* Set number of hwfns - might be overriden once leading hwfn learns
+ /* Set number of hwfns - might be overridden once leading hwfn learns
* actual configuration from PF.
*/
if (IS_LEAD_HWFN(p_hwfn))
@@ -504,7 +504,7 @@ int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn)
QED_MSG_IOV,
"VF's Request mailbox [%p virt 0x%llx phys], Response mailbox [%p virt 0x%llx phys]\n",
p_iov->vf2pf_request,
- (u64) p_iov->vf2pf_request_phys,
+ (u64)p_iov->vf2pf_request_phys,
p_iov->pf2vf_reply, (u64)p_iov->pf2vf_reply_phys);
/* Allocate Bulletin board */
@@ -561,6 +561,7 @@ free_p_iov:
return -ENOMEM;
}
+
#define TSTORM_QZONE_START PXP_VF_BAR0_START_SDM_ZONE_A
#define MSTORM_QZONE_START(dev) (TSTORM_QZONE_START + \
(TSTORM_QZONE_SIZE * NUM_OF_L2_QUEUES(dev)))
@@ -1285,8 +1286,8 @@ int qed_vf_pf_filter_ucast(struct qed_hwfn *p_hwfn,
/* clear mailbox and prep first tlv */
req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_UCAST_FILTER, sizeof(*req));
- req->opcode = (u8) p_ucast->opcode;
- req->type = (u8) p_ucast->type;
+ req->opcode = (u8)p_ucast->opcode;
+ req->type = (u8)p_ucast->type;
memcpy(req->mac, p_ucast->mac, ETH_ALEN);
req->vlan = p_ucast->vlan;
@@ -1372,7 +1373,7 @@ exit:
int
qed_vf_pf_bulletin_update_mac(struct qed_hwfn *p_hwfn,
- u8 *p_mac)
+ const u8 *p_mac)
{
struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
struct vfpf_bulletin_update_mac_tlv *p_req;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.h b/drivers/net/ethernet/qlogic/qed/qed_vf.h
index 60d2bb64e65f..306b5f4bc632 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_vf.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_vf.h
@@ -48,7 +48,7 @@ struct channel_tlv {
u16 length;
};
-/* header of first vf->pf tlv carries the offset used to calculate reponse
+/* header of first vf->pf tlv carries the offset used to calculate response
* buffer address
*/
struct vfpf_first_tlv {
@@ -85,8 +85,8 @@ struct vfpf_acquire_tlv {
struct vfpf_first_tlv first_tlv;
struct vf_pf_vfdev_info {
-#define VFPF_ACQUIRE_CAP_PRE_FP_HSI (1 << 0) /* VF pre-FP hsi version */
-#define VFPF_ACQUIRE_CAP_100G (1 << 1) /* VF can support 100g */
+#define VFPF_ACQUIRE_CAP_PRE_FP_HSI BIT(0) /* VF pre-FP hsi version */
+#define VFPF_ACQUIRE_CAP_100G BIT(1) /* VF can support 100g */
/* A requirement for supporting multi-Tx queues on a single queue-zone,
* VF would pass qids as additional information whenever passing queue
* references.
@@ -688,13 +688,16 @@ struct qed_vf_iov {
};
/**
- * @brief VF - Set Rx/Tx coalesce per VF's relative queue.
- * Coalesce value '0' will omit the configuration.
+ * qed_vf_pf_set_coalesce(): VF - Set Rx/Tx coalesce per VF's relative queue.
+ * Coalesce value '0' will omit the
+ * configuration.
*
- * @param p_hwfn
- * @param rx_coal - coalesce value in micro second for rx queue
- * @param tx_coal - coalesce value in micro second for tx queue
- * @param p_cid - queue cid
+ * @p_hwfn: HW device data.
+ * @rx_coal: coalesce value in micro second for rx queue.
+ * @tx_coal: coalesce value in micro second for tx queue.
+ * @p_cid: queue cid.
+ *
+ * Return: Int.
*
**/
int qed_vf_pf_set_coalesce(struct qed_hwfn *p_hwfn,
@@ -702,148 +705,172 @@ int qed_vf_pf_set_coalesce(struct qed_hwfn *p_hwfn,
u16 tx_coal, struct qed_queue_cid *p_cid);
/**
- * @brief VF - Get coalesce per VF's relative queue.
+ * qed_vf_pf_get_coalesce(): VF - Get coalesce per VF's relative queue.
*
- * @param p_hwfn
- * @param p_coal - coalesce value in micro second for VF queues.
- * @param p_cid - queue cid
+ * @p_hwfn: HW device data.
+ * @p_coal: coalesce value in micro second for VF queues.
+ * @p_cid: queue cid.
*
+ * Return: Int.
**/
int qed_vf_pf_get_coalesce(struct qed_hwfn *p_hwfn,
u16 *p_coal, struct qed_queue_cid *p_cid);
#ifdef CONFIG_QED_SRIOV
/**
- * @brief Read the VF bulletin and act on it if needed
+ * qed_vf_read_bulletin(): Read the VF bulletin and act on it if needed.
*
- * @param p_hwfn
- * @param p_change - qed fills 1 iff bulletin board has changed, 0 otherwise.
+ * @p_hwfn: HW device data.
+ * @p_change: qed fills 1 iff bulletin board has changed, 0 otherwise.
*
- * @return enum _qed_status
+ * Return: enum _qed_status.
*/
int qed_vf_read_bulletin(struct qed_hwfn *p_hwfn, u8 *p_change);
/**
- * @brief Get link paramters for VF from qed
+ * qed_vf_get_link_params(): Get link parameters for VF from qed
+ *
+ * @p_hwfn: HW device data.
+ * @params: the link params structure to be filled for the VF.
*
- * @param p_hwfn
- * @param params - the link params structure to be filled for the VF
+ * Return: Void.
*/
void qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
struct qed_mcp_link_params *params);
/**
- * @brief Get link state for VF from qed
+ * qed_vf_get_link_state(): Get link state for VF from qed.
+ *
+ * @p_hwfn: HW device data.
+ * @link: the link state structure to be filled for the VF
*
- * @param p_hwfn
- * @param link - the link state structure to be filled for the VF
+ * Return: Void.
*/
void qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
struct qed_mcp_link_state *link);
/**
- * @brief Get link capabilities for VF from qed
+ * qed_vf_get_link_caps(): Get link capabilities for VF from qed.
*
- * @param p_hwfn
- * @param p_link_caps - the link capabilities structure to be filled for the VF
+ * @p_hwfn: HW device data.
+ * @p_link_caps: the link capabilities structure to be filled for the VF
+ *
+ * Return: Void.
*/
void qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
struct qed_mcp_link_capabilities *p_link_caps);
/**
- * @brief Get number of Rx queues allocated for VF by qed
+ * qed_vf_get_num_rxqs(): Get number of Rx queues allocated for VF by qed
+ *
+ * @p_hwfn: HW device data.
+ * @num_rxqs: allocated RX queues
*
- * @param p_hwfn
- * @param num_rxqs - allocated RX queues
+ * Return: Void.
*/
void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs);
/**
- * @brief Get number of Rx queues allocated for VF by qed
+ * qed_vf_get_num_txqs(): Get number of Rx queues allocated for VF by qed
*
- * @param p_hwfn
- * @param num_txqs - allocated RX queues
+ * @p_hwfn: HW device data.
+ * @num_txqs: allocated RX queues
+ *
+ * Return: Void.
*/
void qed_vf_get_num_txqs(struct qed_hwfn *p_hwfn, u8 *num_txqs);
/**
- * @brief Get number of available connections [both Rx and Tx] for VF
+ * qed_vf_get_num_cids(): Get number of available connections
+ * [both Rx and Tx] for VF
+ *
+ * @p_hwfn: HW device data.
+ * @num_cids: allocated number of connections
*
- * @param p_hwfn
- * @param num_cids - allocated number of connections
+ * Return: Void.
*/
void qed_vf_get_num_cids(struct qed_hwfn *p_hwfn, u8 *num_cids);
/**
- * @brief Get port mac address for VF
+ * qed_vf_get_port_mac(): Get port mac address for VF.
*
- * @param p_hwfn
- * @param port_mac - destination location for port mac
+ * @p_hwfn: HW device data.
+ * @port_mac: destination location for port mac
+ *
+ * Return: Void.
*/
void qed_vf_get_port_mac(struct qed_hwfn *p_hwfn, u8 *port_mac);
/**
- * @brief Get number of VLAN filters allocated for VF by qed
+ * qed_vf_get_num_vlan_filters(): Get number of VLAN filters allocated
+ * for VF by qed.
+ *
+ * @p_hwfn: HW device data.
+ * @num_vlan_filters: allocated VLAN filters
*
- * @param p_hwfn
- * @param num_rxqs - allocated VLAN filters
+ * Return: Void.
*/
void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn,
u8 *num_vlan_filters);
/**
- * @brief Get number of MAC filters allocated for VF by qed
+ * qed_vf_get_num_mac_filters(): Get number of MAC filters allocated
+ * for VF by qed
*
- * @param p_hwfn
- * @param num_rxqs - allocated MAC filters
+ * @p_hwfn: HW device data.
+ * @num_mac_filters: allocated MAC filters
+ *
+ * Return: Void.
*/
void qed_vf_get_num_mac_filters(struct qed_hwfn *p_hwfn, u8 *num_mac_filters);
/**
- * @brief Check if VF can set a MAC address
+ * qed_vf_check_mac(): Check if VF can set a MAC address
*
- * @param p_hwfn
- * @param mac
+ * @p_hwfn: HW device data.
+ * @mac: Mac.
*
- * @return bool
+ * Return: bool.
*/
bool qed_vf_check_mac(struct qed_hwfn *p_hwfn, u8 *mac);
/**
- * @brief Set firmware version information in dev_info from VFs acquire response tlv
+ * qed_vf_get_fw_version(): Set firmware version information
+ * in dev_info from VFs acquire response tlv
+ *
+ * @p_hwfn: HW device data.
+ * @fw_major: FW major.
+ * @fw_minor: FW minor.
+ * @fw_rev: FW rev.
+ * @fw_eng: FW eng.
*
- * @param p_hwfn
- * @param fw_major
- * @param fw_minor
- * @param fw_rev
- * @param fw_eng
+ * Return: Void.
*/
void qed_vf_get_fw_version(struct qed_hwfn *p_hwfn,
u16 *fw_major, u16 *fw_minor,
u16 *fw_rev, u16 *fw_eng);
/**
- * @brief hw preparation for VF
- * sends ACQUIRE message
+ * qed_vf_hw_prepare(): hw preparation for VF sends ACQUIRE message
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
*
- * @return int
+ * Return: Int.
*/
int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn);
/**
- * @brief VF - start the RX Queue by sending a message to the PF
- * @param p_hwfn
- * @param p_cid - Only relative fields are relevant
- * @param bd_max_bytes - maximum number of bytes per bd
- * @param bd_chain_phys_addr - physical address of bd chain
- * @param cqe_pbl_addr - physical address of pbl
- * @param cqe_pbl_size - pbl size
- * @param pp_prod - pointer to the producer to be
- * used in fastpath
+ * qed_vf_pf_rxq_start(): start the RX Queue by sending a message to the PF
+ *
+ * @p_hwfn: HW device data.
+ * @p_cid: Only relative fields are relevant
+ * @bd_max_bytes: maximum number of bytes per bd
+ * @bd_chain_phys_addr: physical address of bd chain
+ * @cqe_pbl_addr: physical address of pbl
+ * @cqe_pbl_size: pbl size
+ * @pp_prod: pointer to the producer to be used in fastpath
*
- * @return int
+ * Return: Int.
*/
int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
struct qed_queue_cid *p_cid,
@@ -853,18 +880,16 @@ int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
u16 cqe_pbl_size, void __iomem **pp_prod);
/**
- * @brief VF - start the TX queue by sending a message to the
- * PF.
+ * qed_vf_pf_txq_start(): VF - start the TX queue by sending a message to the
+ * PF.
*
- * @param p_hwfn
- * @param tx_queue_id - zero based within the VF
- * @param sb - status block for this queue
- * @param sb_index - index within the status block
- * @param bd_chain_phys_addr - physical address of tx chain
- * @param pp_doorbell - pointer to address to which to
- * write the doorbell too..
+ * @p_hwfn: HW device data.
+ * @p_cid: CID.
+ * @pbl_addr: PBL address.
+ * @pbl_size: PBL Size.
+ * @pp_doorbell: pointer to address to which to write the doorbell too.
*
- * @return int
+ * Return: Int.
*/
int
qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
@@ -873,90 +898,91 @@ qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
u16 pbl_size, void __iomem **pp_doorbell);
/**
- * @brief VF - stop the RX queue by sending a message to the PF
+ * qed_vf_pf_rxq_stop(): VF - stop the RX queue by sending a message to the PF.
*
- * @param p_hwfn
- * @param p_cid
- * @param cqe_completion
+ * @p_hwfn: HW device data.
+ * @p_cid: CID.
+ * @cqe_completion: CQE Completion.
*
- * @return int
+ * Return: Int.
*/
int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn,
struct qed_queue_cid *p_cid, bool cqe_completion);
/**
- * @brief VF - stop the TX queue by sending a message to the PF
+ * qed_vf_pf_txq_stop(): VF - stop the TX queue by sending a message to the PF.
*
- * @param p_hwfn
- * @param tx_qid
+ * @p_hwfn: HW device data.
+ * @p_cid: CID.
*
- * @return int
+ * Return: Int.
*/
int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid);
/**
- * @brief VF - send a vport update command
+ * qed_vf_pf_vport_update(): VF - send a vport update command.
*
- * @param p_hwfn
- * @param params
+ * @p_hwfn: HW device data.
+ * @p_params: Params
*
- * @return int
+ * Return: Int.
*/
int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn,
struct qed_sp_vport_update_params *p_params);
/**
+ * qed_vf_pf_reset(): VF - send a close message to PF.
*
- * @brief VF - send a close message to PF
+ * @p_hwfn: HW device data.
*
- * @param p_hwfn
- *
- * @return enum _qed_status
+ * Return: enum _qed_status
*/
int qed_vf_pf_reset(struct qed_hwfn *p_hwfn);
/**
- * @brief VF - free vf`s memories
+ * qed_vf_pf_release(): VF - free vf`s memories.
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
*
- * @return enum _qed_status
+ * Return: enum _qed_status
*/
int qed_vf_pf_release(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_vf_get_igu_sb_id - Get the IGU SB ID for a given
+ * qed_vf_get_igu_sb_id(): Get the IGU SB ID for a given
* sb_id. For VFs igu sbs don't have to be contiguous
*
- * @param p_hwfn
- * @param sb_id
+ * @p_hwfn: HW device data.
+ * @sb_id: SB ID.
*
- * @return INLINE u16
+ * Return: INLINE u16
*/
u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id);
/**
- * @brief Stores [or removes] a configured sb_info.
+ * qed_vf_set_sb_info(): Stores [or removes] a configured sb_info.
+ *
+ * @p_hwfn: HW device data.
+ * @sb_id: zero-based SB index [for fastpath]
+ * @p_sb: may be NULL [during removal].
*
- * @param p_hwfn
- * @param sb_id - zero-based SB index [for fastpath]
- * @param sb_info - may be NULL [during removal].
+ * Return: Void.
*/
void qed_vf_set_sb_info(struct qed_hwfn *p_hwfn,
u16 sb_id, struct qed_sb_info *p_sb);
/**
- * @brief qed_vf_pf_vport_start - perform vport start for VF.
+ * qed_vf_pf_vport_start(): perform vport start for VF.
*
- * @param p_hwfn
- * @param vport_id
- * @param mtu
- * @param inner_vlan_removal
- * @param tpa_mode
- * @param max_buffers_per_cqe,
- * @param only_untagged - default behavior regarding vlan acceptance
+ * @p_hwfn: HW device data.
+ * @vport_id: Vport ID.
+ * @mtu: MTU.
+ * @inner_vlan_removal: Innter VLAN removal.
+ * @tpa_mode: TPA mode
+ * @max_buffers_per_cqe: Max buffer pre CQE.
+ * @only_untagged: default behavior regarding vlan acceptance
*
- * @return enum _qed_status
+ * Return: enum _qed_status
*/
int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn,
u8 vport_id,
@@ -966,11 +992,11 @@ int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn,
u8 max_buffers_per_cqe, u8 only_untagged);
/**
- * @brief qed_vf_pf_vport_stop - stop the VF's vport
+ * qed_vf_pf_vport_stop(): stop the VF's vport
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
*
- * @return enum _qed_status
+ * Return: enum _qed_status
*/
int qed_vf_pf_vport_stop(struct qed_hwfn *p_hwfn);
@@ -981,42 +1007,49 @@ void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn,
struct qed_filter_mcast *p_filter_cmd);
/**
- * @brief qed_vf_pf_int_cleanup - clean the SB of the VF
+ * qed_vf_pf_int_cleanup(): clean the SB of the VF
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
*
- * @return enum _qed_status
+ * Return: enum _qed_status
*/
int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn);
/**
- * @brief - return the link params in a given bulletin board
+ * __qed_vf_get_link_params(): return the link params in a given bulletin board
*
- * @param p_hwfn
- * @param p_params - pointer to a struct to fill with link params
- * @param p_bulletin
+ * @p_hwfn: HW device data.
+ * @p_params: pointer to a struct to fill with link params
+ * @p_bulletin: Bulletin.
+ *
+ * Return: Void.
*/
void __qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
struct qed_mcp_link_params *p_params,
struct qed_bulletin_content *p_bulletin);
/**
- * @brief - return the link state in a given bulletin board
+ * __qed_vf_get_link_state(): return the link state in a given bulletin board
+ *
+ * @p_hwfn: HW device data.
+ * @p_link: pointer to a struct to fill with link state
+ * @p_bulletin: Bulletin.
*
- * @param p_hwfn
- * @param p_link - pointer to a struct to fill with link state
- * @param p_bulletin
+ * Return: Void.
*/
void __qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
struct qed_mcp_link_state *p_link,
struct qed_bulletin_content *p_bulletin);
/**
- * @brief - return the link capabilities in a given bulletin board
+ * __qed_vf_get_link_caps(): return the link capabilities in a given
+ * bulletin board
*
- * @param p_hwfn
- * @param p_link - pointer to a struct to fill with link capabilities
- * @param p_bulletin
+ * @p_hwfn: HW device data.
+ * @p_link_caps: pointer to a struct to fill with link capabilities
+ * @p_bulletin: Bulletin.
+ *
+ * Return: Void.
*/
void __qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
struct qed_mcp_link_capabilities *p_link_caps,
@@ -1029,11 +1062,15 @@ int qed_vf_pf_tunnel_param_update(struct qed_hwfn *p_hwfn,
u32 qed_vf_hw_bar_size(struct qed_hwfn *p_hwfn, enum BAR_ID bar_id);
/**
- * @brief - Ask PF to update the MAC address in it's bulletin board
+ * qed_vf_pf_bulletin_update_mac(): Ask PF to update the MAC address in
+ * it's bulletin board
+ *
+ * @p_hwfn: HW device data.
+ * @p_mac: mac address to be updated in bulletin board
*
- * @param p_mac - mac address to be updated in bulletin board
+ * Return: Int.
*/
-int qed_vf_pf_bulletin_update_mac(struct qed_hwfn *p_hwfn, u8 *p_mac);
+int qed_vf_pf_bulletin_update_mac(struct qed_hwfn *p_hwfn, const u8 *p_mac);
#else
static inline void qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
@@ -1222,7 +1259,7 @@ static inline int qed_vf_pf_tunnel_param_update(struct qed_hwfn *p_hwfn,
}
static inline int qed_vf_pf_bulletin_update_mac(struct qed_hwfn *p_hwfn,
- u8 *p_mac)
+ const u8 *p_mac)
{
return -EINVAL;
}
diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c
index a2e4dfb5cb44..3010833ddde3 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_filter.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c
@@ -557,7 +557,7 @@ void qede_force_mac(void *dev, u8 *mac, bool forced)
return;
}
- ether_addr_copy(edev->ndev->dev_addr, mac);
+ eth_hw_addr_set(edev->ndev, mac);
__qede_unlock(edev);
}
@@ -617,32 +617,30 @@ void qede_fill_rss_params(struct qede_dev *edev,
static int qede_set_ucast_rx_mac(struct qede_dev *edev,
enum qed_filter_xcast_params_type opcode,
- unsigned char mac[ETH_ALEN])
+ const unsigned char mac[ETH_ALEN])
{
- struct qed_filter_params filter_cmd;
+ struct qed_filter_ucast_params ucast;
- memset(&filter_cmd, 0, sizeof(filter_cmd));
- filter_cmd.type = QED_FILTER_TYPE_UCAST;
- filter_cmd.filter.ucast.type = opcode;
- filter_cmd.filter.ucast.mac_valid = 1;
- ether_addr_copy(filter_cmd.filter.ucast.mac, mac);
+ memset(&ucast, 0, sizeof(ucast));
+ ucast.type = opcode;
+ ucast.mac_valid = 1;
+ ether_addr_copy(ucast.mac, mac);
- return edev->ops->filter_config(edev->cdev, &filter_cmd);
+ return edev->ops->filter_config_ucast(edev->cdev, &ucast);
}
static int qede_set_ucast_rx_vlan(struct qede_dev *edev,
enum qed_filter_xcast_params_type opcode,
u16 vid)
{
- struct qed_filter_params filter_cmd;
+ struct qed_filter_ucast_params ucast;
- memset(&filter_cmd, 0, sizeof(filter_cmd));
- filter_cmd.type = QED_FILTER_TYPE_UCAST;
- filter_cmd.filter.ucast.type = opcode;
- filter_cmd.filter.ucast.vlan_valid = 1;
- filter_cmd.filter.ucast.vlan = vid;
+ memset(&ucast, 0, sizeof(ucast));
+ ucast.type = opcode;
+ ucast.vlan_valid = 1;
+ ucast.vlan = vid;
- return edev->ops->filter_config(edev->cdev, &filter_cmd);
+ return edev->ops->filter_config_ucast(edev->cdev, &ucast);
}
static int qede_config_accept_any_vlan(struct qede_dev *edev, bool action)
@@ -1057,18 +1055,17 @@ static int qede_set_mcast_rx_mac(struct qede_dev *edev,
enum qed_filter_xcast_params_type opcode,
unsigned char *mac, int num_macs)
{
- struct qed_filter_params filter_cmd;
+ struct qed_filter_mcast_params mcast;
int i;
- memset(&filter_cmd, 0, sizeof(filter_cmd));
- filter_cmd.type = QED_FILTER_TYPE_MCAST;
- filter_cmd.filter.mcast.type = opcode;
- filter_cmd.filter.mcast.num = num_macs;
+ memset(&mcast, 0, sizeof(mcast));
+ mcast.type = opcode;
+ mcast.num = num_macs;
for (i = 0; i < num_macs; i++, mac += ETH_ALEN)
- ether_addr_copy(filter_cmd.filter.mcast.mac[i], mac);
+ ether_addr_copy(mcast.mac[i], mac);
- return edev->ops->filter_config(edev->cdev, &filter_cmd);
+ return edev->ops->filter_config_mcast(edev->cdev, &mcast);
}
int qede_set_mac_addr(struct net_device *ndev, void *p)
@@ -1104,7 +1101,7 @@ int qede_set_mac_addr(struct net_device *ndev, void *p)
goto out;
}
- ether_addr_copy(ndev->dev_addr, addr->sa_data);
+ eth_hw_addr_set(ndev, addr->sa_data);
DP_INFO(edev, "Setting device MAC to %pM\n", addr->sa_data);
if (edev->state != QEDE_STATE_OPEN) {
@@ -1194,7 +1191,6 @@ void qede_config_rx_mode(struct net_device *ndev)
{
enum qed_filter_rx_mode_type accept_flags;
struct qede_dev *edev = netdev_priv(ndev);
- struct qed_filter_params rx_mode;
unsigned char *uc_macs, *temp;
struct netdev_hw_addr *ha;
int rc, uc_count;
@@ -1220,10 +1216,6 @@ void qede_config_rx_mode(struct net_device *ndev)
netif_addr_unlock_bh(ndev);
- /* Configure the struct for the Rx mode */
- memset(&rx_mode, 0, sizeof(struct qed_filter_params));
- rx_mode.type = QED_FILTER_TYPE_RX_MODE;
-
/* Remove all previous unicast secondary macs and multicast macs
* (configure / leave the primary mac)
*/
@@ -1271,8 +1263,7 @@ void qede_config_rx_mode(struct net_device *ndev)
qede_config_accept_any_vlan(edev, false);
}
- rx_mode.filter.accept_flags = accept_flags;
- edev->ops->filter_config(edev->cdev, &rx_mode);
+ edev->ops->filter_config_rx_mode(edev->cdev, accept_flags);
out:
kfree(uc_macs);
}
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 9837bdb89cd4..06c6a5813606 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -836,7 +836,7 @@ static void qede_init_ndev(struct qede_dev *edev)
ndev->max_mtu = QEDE_MAX_JUMBO_PACKET_SIZE;
/* Set network device HW mac */
- ether_addr_copy(edev->ndev->dev_addr, edev->dev_info.common.hw_mac);
+ eth_hw_addr_set(edev->ndev, edev->dev_info.common.hw_mac);
ndev->mtu = edev->dev_info.common.mtu;
}
@@ -1176,19 +1176,17 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
edev->devlink = qed_ops->common->devlink_register(cdev);
if (IS_ERR(edev->devlink)) {
DP_NOTICE(edev, "Cannot register devlink\n");
+ rc = PTR_ERR(edev->devlink);
edev->devlink = NULL;
- /* Go on, we can live without devlink */
+ goto err3;
}
} else {
struct net_device *ndev = pci_get_drvdata(pdev);
+ struct qed_devlink *qdl;
edev = netdev_priv(ndev);
-
- if (edev->devlink) {
- struct qed_devlink *qdl = devlink_priv(edev->devlink);
-
- qdl->cdev = cdev;
- }
+ qdl = devlink_priv(edev->devlink);
+ qdl->cdev = cdev;
edev->cdev = cdev;
memset(&edev->stats, 0, sizeof(edev->stats));
memcpy(&edev->dev_info, &dev_info, sizeof(dev_info));
@@ -1397,7 +1395,7 @@ static void qede_free_mem_sb(struct qede_dev *edev, struct qed_sb_info *sb_info,
static int qede_alloc_mem_sb(struct qede_dev *edev,
struct qed_sb_info *sb_info, u16 sb_id)
{
- struct status_block_e4 *sb_virt;
+ struct status_block *sb_virt;
dma_addr_t sb_phys;
int rc;
@@ -2802,10 +2800,13 @@ static void qede_get_eth_tlv_data(void *dev, void *data)
}
/**
- * qede_io_error_detected - called when PCI error is detected
+ * qede_io_error_detected(): Called when PCI error is detected
+ *
* @pdev: Pointer to PCI device
* @state: The current pci connection state
*
+ *Return: pci_ers_result_t.
+ *
* This function is called after a PCI bus error affecting
* this device has been detected.
*/
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index c00ad57575ea..1e6d72adfe43 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -508,10 +508,12 @@ static void eeprom_readword(struct ql3_adapter *qdev,
static void ql_set_mac_addr(struct net_device *ndev, u16 *addr)
{
- __le16 *p = (__le16 *)ndev->dev_addr;
- p[0] = cpu_to_le16(addr[0]);
- p[1] = cpu_to_le16(addr[1]);
- p[2] = cpu_to_le16(addr[2]);
+ __le16 buf[ETH_ALEN / 2];
+
+ buf[0] = cpu_to_le16(addr[0]);
+ buf[1] = cpu_to_le16(addr[1]);
+ buf[2] = cpu_to_le16(addr[2]);
+ eth_hw_addr_set(ndev, (u8 *)buf);
}
static int ql_get_nvram_params(struct ql3_adapter *qdev)
@@ -3564,7 +3566,7 @@ static int ql3xxx_set_mac_address(struct net_device *ndev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
+ eth_hw_addr_set(ndev, addr->sa_data);
spin_lock_irqsave(&qdev->hw_lock, hw_flags);
/* Program lower 32 bits of the MAC address */
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 75960a29f80e..ed84f0f97623 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -304,7 +304,7 @@ int qlcnic_read_mac_addr(struct qlcnic_adapter *adapter)
if (ret)
return ret;
- memcpy(netdev->dev_addr, mac_addr, ETH_ALEN);
+ eth_hw_addr_set(netdev, mac_addr);
memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len);
/* set station address */
@@ -356,7 +356,7 @@ static int qlcnic_set_mac(struct net_device *netdev, void *p)
qlcnic_delete_adapter_mac(adapter);
memcpy(adapter->mac_addr, addr->sa_data, netdev->addr_len);
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, addr->sa_data);
qlcnic_set_multi(adapter->netdev);
if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
index 87b8c032195d..06104d2ff5b3 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
@@ -420,7 +420,7 @@ static void emac_mac_dma_config(struct emac_adapter *adpt)
}
/* set MAC address */
-static void emac_set_mac_address(struct emac_adapter *adpt, u8 *addr)
+static void emac_set_mac_address(struct emac_adapter *adpt, const u8 *addr)
{
u32 sta;
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c
index 9015a38eaced..a55c52696d49 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac.c
@@ -545,13 +545,10 @@ static int emac_probe_resources(struct platform_device *pdev,
struct emac_adapter *adpt)
{
struct net_device *netdev = adpt->netdev;
- char maddr[ETH_ALEN];
int ret = 0;
/* get mac address */
- if (device_get_mac_address(&pdev->dev, maddr, ETH_ALEN))
- ether_addr_copy(netdev->dev_addr, maddr);
- else
+ if (device_get_ethdev_address(&pdev->dev, netdev))
eth_hw_addr_random(netdev);
/* Core 0 interrupt */
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
index 8427fe1b8fd1..955cce644392 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.c
+++ b/drivers/net/ethernet/qualcomm/qca_spi.c
@@ -968,7 +968,7 @@ qca_spi_probe(struct spi_device *spi)
spi_set_drvdata(spi, qcaspi_devs);
- ret = of_get_mac_address(spi->dev.of_node, qca->net_dev->dev_addr);
+ ret = of_get_ethdev_address(spi->dev.of_node, qca->net_dev);
if (ret) {
eth_hw_addr_random(qca->net_dev);
dev_info(&spi->dev, "Using random MAC address: %pM\n",
diff --git a/drivers/net/ethernet/qualcomm/qca_uart.c b/drivers/net/ethernet/qualcomm/qca_uart.c
index ce3f7ce31adc..27c4f43176aa 100644
--- a/drivers/net/ethernet/qualcomm/qca_uart.c
+++ b/drivers/net/ethernet/qualcomm/qca_uart.c
@@ -347,7 +347,7 @@ static int qca_uart_probe(struct serdev_device *serdev)
of_property_read_u32(serdev->dev.of_node, "current-speed", &speed);
- ret = of_get_mac_address(serdev->dev.of_node, qca->net_dev->dev_addr);
+ ret = of_get_ethdev_address(serdev->dev.of_node, qca->net_dev);
if (ret) {
eth_hw_addr_random(qca->net_dev);
dev_info(&serdev->dev, "Using random MAC address: %pM\n",
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
index 13d8eb43a485..1b2119b1d48a 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
@@ -224,7 +224,7 @@ void rmnet_vnd_setup(struct net_device *rmnet_dev)
rmnet_dev->netdev_ops = &rmnet_vnd_ops;
rmnet_dev->mtu = RMNET_DFLT_PACKET_SIZE;
rmnet_dev->needed_headroom = RMNET_NEEDED_HEADROOM;
- eth_random_addr(rmnet_dev->dev_addr);
+ eth_hw_addr_random(rmnet_dev);
rmnet_dev->tx_queue_len = RMNET_TX_QUEUE_LEN;
/* Raw IP mode */
diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c
index 01ef5efd7bc2..a6bf7d505178 100644
--- a/drivers/net/ethernet/rdc/r6040.c
+++ b/drivers/net/ethernet/rdc/r6040.c
@@ -453,7 +453,7 @@ static void r6040_down(struct net_device *dev)
{
struct r6040_private *lp = netdev_priv(dev);
void __iomem *ioaddr = lp->base;
- u16 *adrp;
+ const u16 *adrp;
/* Stop MAC */
iowrite16(MSK_INT, ioaddr + MIER); /* Mask Off Interrupt */
@@ -462,7 +462,7 @@ static void r6040_down(struct net_device *dev)
r6040_reset_mac(lp);
/* Restore MAC Address to MIDx */
- adrp = (u16 *) dev->dev_addr;
+ adrp = (const u16 *) dev->dev_addr;
iowrite16(adrp[0], ioaddr + MID_0L);
iowrite16(adrp[1], ioaddr + MID_0M);
iowrite16(adrp[2], ioaddr + MID_0H);
@@ -731,13 +731,13 @@ static void r6040_mac_address(struct net_device *dev)
{
struct r6040_private *lp = netdev_priv(dev);
void __iomem *ioaddr = lp->base;
- u16 *adrp;
+ const u16 *adrp;
/* Reset MAC */
r6040_reset_mac(lp);
/* Restore MAC Address */
- adrp = (u16 *) dev->dev_addr;
+ adrp = (const u16 *) dev->dev_addr;
iowrite16(adrp[0], ioaddr + MID_0L);
iowrite16(adrp[1], ioaddr + MID_0M);
iowrite16(adrp[2], ioaddr + MID_0H);
@@ -849,13 +849,13 @@ static void r6040_multicast_list(struct net_device *dev)
unsigned long flags;
struct netdev_hw_addr *ha;
int i;
- u16 *adrp;
+ const u16 *adrp;
u16 hash_table[4] = { 0 };
spin_lock_irqsave(&lp->lock, flags);
/* Keep our MAC Address */
- adrp = (u16 *)dev->dev_addr;
+ adrp = (const u16 *)dev->dev_addr;
iowrite16(adrp[0], ioaddr + MID_0L);
iowrite16(adrp[1], ioaddr + MID_0M);
iowrite16(adrp[2], ioaddr + MID_0H);
@@ -1031,8 +1031,8 @@ static int r6040_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
void __iomem *ioaddr;
int err, io_size = R6040_IO_SIZE;
static int card_idx = -1;
+ u16 addr[ETH_ALEN / 2];
int bar = 0;
- u16 *adrp;
pr_info("%s\n", version);
@@ -1102,14 +1102,14 @@ static int r6040_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Set MAC address */
card_idx++;
- adrp = (u16 *)dev->dev_addr;
- adrp[0] = ioread16(ioaddr + MID_0L);
- adrp[1] = ioread16(ioaddr + MID_0M);
- adrp[2] = ioread16(ioaddr + MID_0H);
+ addr[0] = ioread16(ioaddr + MID_0L);
+ addr[1] = ioread16(ioaddr + MID_0M);
+ addr[2] = ioread16(ioaddr + MID_0H);
+ eth_hw_addr_set(dev, (u8 *)addr);
/* Some bootloader/BIOSes do not initialize
* MAC address, warn about that */
- if (!(adrp[0] || adrp[1] || adrp[2])) {
+ if (!(addr[0] || addr[1] || addr[2])) {
netdev_warn(dev, "MAC address not initialized, "
"generating random\n");
eth_hw_addr_random(dev);
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index 2b84b4565e64..4f39f843bb3a 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -1624,7 +1624,7 @@ static int cp_set_mac_address(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, addr->sa_data);
spin_lock_irq(&cp->lock);
@@ -1889,6 +1889,7 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
void __iomem *regs;
resource_size_t pciaddr;
unsigned int addr_len, i, pci_using_dac;
+ __le16 addr[ETH_ALEN / 2];
pr_info_once("%s", version);
@@ -1979,8 +1980,8 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
/* read MAC address from EEPROM */
addr_len = read_eeprom (regs, 0, 8) == 0x8129 ? 8 : 6;
for (i = 0; i < 3; i++)
- ((__le16 *) (dev->dev_addr))[i] =
- cpu_to_le16(read_eeprom (regs, i + 7, addr_len));
+ addr[i] = cpu_to_le16(read_eeprom (regs, i + 7, addr_len));
+ eth_hw_addr_set(dev, (u8 *)addr);
dev->netdev_ops = &cp_netdev_ops;
netif_napi_add(dev, &cp->napi, cp_rx_poll, 16);
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
index 2e6923cc653e..15b40fd93cd2 100644
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -945,6 +945,7 @@ static int rtl8139_init_one(struct pci_dev *pdev,
{
struct net_device *dev = NULL;
struct rtl8139_private *tp;
+ __le16 addr[ETH_ALEN / 2];
int i, addr_len, option;
void __iomem *ioaddr;
static int board_idx = -1;
@@ -994,8 +995,8 @@ static int rtl8139_init_one(struct pci_dev *pdev,
addr_len = read_eeprom (ioaddr, 0, 8) == 0x8129 ? 8 : 6;
for (i = 0; i < 3; i++)
- ((__le16 *) (dev->dev_addr))[i] =
- cpu_to_le16(read_eeprom (ioaddr, i + 7, addr_len));
+ addr[i] = cpu_to_le16(read_eeprom (ioaddr, i + 7, addr_len));
+ eth_hw_addr_set(dev, (u8 *)addr);
/* The Rtl8139-specific entries in the device structure. */
dev->netdev_ops = &rtl8139_netdev_ops;
@@ -2238,7 +2239,7 @@ static int rtl8139_set_mac_address(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, addr->sa_data);
spin_lock_irq(&tp->lock);
diff --git a/drivers/net/ethernet/realtek/atp.c b/drivers/net/ethernet/realtek/atp.c
index b6c849b258a0..6cbcb3164367 100644
--- a/drivers/net/ethernet/realtek/atp.c
+++ b/drivers/net/ethernet/realtek/atp.c
@@ -368,6 +368,7 @@ static int __init atp_probe1(long ioaddr)
static void __init get_node_ID(struct net_device *dev)
{
long ioaddr = dev->base_addr;
+ __be16 addr[ETH_ALEN / 2];
int sa_offset = 0;
int i;
@@ -379,8 +380,9 @@ static void __init get_node_ID(struct net_device *dev)
sa_offset = 15;
for (i = 0; i < 3; i++)
- ((__be16 *)dev->dev_addr)[i] =
+ addr[i] =
cpu_to_be16(eeprom_op(ioaddr, EE_READ(sa_offset + i)));
+ eth_hw_addr_set(dev, (u8 *)addr);
write_reg(ioaddr, CMR2, CMR2_NULL);
}
diff --git a/drivers/net/ethernet/realtek/r8169.h b/drivers/net/ethernet/realtek/r8169.h
index 2728df46ec41..8da4b66b71b5 100644
--- a/drivers/net/ethernet/realtek/r8169.h
+++ b/drivers/net/ethernet/realtek/r8169.h
@@ -37,7 +37,7 @@ enum mac_version {
RTL_GIGA_MAC_VER_24,
RTL_GIGA_MAC_VER_25,
RTL_GIGA_MAC_VER_26,
- RTL_GIGA_MAC_VER_27,
+ /* support for RTL_GIGA_MAC_VER_27 has been removed */
RTL_GIGA_MAC_VER_28,
RTL_GIGA_MAC_VER_29,
RTL_GIGA_MAC_VER_30,
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 46a6ff9a782d..ee6c9c842012 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -118,7 +118,6 @@ static const struct {
[RTL_GIGA_MAC_VER_24] = {"RTL8168cp/8111cp" },
[RTL_GIGA_MAC_VER_25] = {"RTL8168d/8111d", FIRMWARE_8168D_1},
[RTL_GIGA_MAC_VER_26] = {"RTL8168d/8111d", FIRMWARE_8168D_2},
- [RTL_GIGA_MAC_VER_27] = {"RTL8168dp/8111dp" },
[RTL_GIGA_MAC_VER_28] = {"RTL8168dp/8111dp" },
[RTL_GIGA_MAC_VER_29] = {"RTL8105e", FIRMWARE_8105E_1},
[RTL_GIGA_MAC_VER_30] = {"RTL8105e", FIRMWARE_8105E_1},
@@ -985,33 +984,6 @@ DECLARE_RTL_COND(rtl_ocpar_cond)
return RTL_R32(tp, OCPAR) & OCPAR_FLAG;
}
-static void r8168dp_1_mdio_access(struct rtl8169_private *tp, int reg, u32 data)
-{
- RTL_W32(tp, OCPDR, data | ((reg & OCPDR_REG_MASK) << OCPDR_GPHY_REG_SHIFT));
- RTL_W32(tp, OCPAR, OCPAR_GPHY_WRITE_CMD);
- RTL_W32(tp, EPHY_RXER_NUM, 0);
-
- rtl_loop_wait_low(tp, &rtl_ocpar_cond, 1000, 100);
-}
-
-static void r8168dp_1_mdio_write(struct rtl8169_private *tp, int reg, int value)
-{
- r8168dp_1_mdio_access(tp, reg,
- OCPDR_WRITE_CMD | (value & OCPDR_DATA_MASK));
-}
-
-static int r8168dp_1_mdio_read(struct rtl8169_private *tp, int reg)
-{
- r8168dp_1_mdio_access(tp, reg, OCPDR_READ_CMD);
-
- mdelay(1);
- RTL_W32(tp, OCPAR, OCPAR_GPHY_READ_CMD);
- RTL_W32(tp, EPHY_RXER_NUM, 0);
-
- return rtl_loop_wait_high(tp, &rtl_ocpar_cond, 1000, 100) ?
- RTL_R32(tp, OCPDR) & OCPDR_DATA_MASK : -ETIMEDOUT;
-}
-
#define R8168DP_1_MDIO_ACCESS_BIT 0x00020000
static void r8168dp_2_mdio_start(struct rtl8169_private *tp)
@@ -1053,9 +1025,6 @@ static int r8168dp_2_mdio_read(struct rtl8169_private *tp, int reg)
static void rtl_writephy(struct rtl8169_private *tp, int location, int val)
{
switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_27:
- r8168dp_1_mdio_write(tp, location, val);
- break;
case RTL_GIGA_MAC_VER_28:
case RTL_GIGA_MAC_VER_31:
r8168dp_2_mdio_write(tp, location, val);
@@ -1072,8 +1041,6 @@ static void rtl_writephy(struct rtl8169_private *tp, int location, int val)
static int rtl_readphy(struct rtl8169_private *tp, int location)
{
switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_27:
- return r8168dp_1_mdio_read(tp, location);
case RTL_GIGA_MAC_VER_28:
case RTL_GIGA_MAC_VER_31:
return r8168dp_2_mdio_read(tp, location);
@@ -1235,7 +1202,6 @@ static bool r8168ep_check_dash(struct rtl8169_private *tp)
static enum rtl_dash_type rtl_check_dash(struct rtl8169_private *tp)
{
switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_27:
case RTL_GIGA_MAC_VER_28:
case RTL_GIGA_MAC_VER_31:
return r8168dp_check_dash(tp) ? RTL_DASH_DP : RTL_DASH_NONE;
@@ -2040,8 +2006,7 @@ static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii)
/* 8168DP family. */
/* It seems this early RTL8168dp version never made it to
- * the wild. Let's see whether somebody complains, if not
- * we'll remove support for this chip version completely.
+ * the wild. Support has been removed.
* { 0x7cf, 0x288, RTL_GIGA_MAC_VER_27 },
*/
{ 0x7cf, 0x28a, RTL_GIGA_MAC_VER_28 },
@@ -2371,7 +2336,7 @@ static void rtl_jumbo_config(struct rtl8169_private *tp)
r8168c_hw_jumbo_disable(tp);
}
break;
- case RTL_GIGA_MAC_VER_27 ... RTL_GIGA_MAC_VER_28:
+ case RTL_GIGA_MAC_VER_28:
if (jumbo)
r8168dp_hw_jumbo_enable(tp);
else
@@ -3719,7 +3684,6 @@ static void rtl_hw_config(struct rtl8169_private *tp)
[RTL_GIGA_MAC_VER_24] = rtl_hw_start_8168cp_3,
[RTL_GIGA_MAC_VER_25] = rtl_hw_start_8168d,
[RTL_GIGA_MAC_VER_26] = rtl_hw_start_8168d,
- [RTL_GIGA_MAC_VER_27] = rtl_hw_start_8168d,
[RTL_GIGA_MAC_VER_28] = rtl_hw_start_8168d_4,
[RTL_GIGA_MAC_VER_29] = rtl_hw_start_8105e_1,
[RTL_GIGA_MAC_VER_30] = rtl_hw_start_8105e_2,
@@ -3982,7 +3946,6 @@ static void rtl8169_cleanup(struct rtl8169_private *tp, bool going_down)
goto no_reset;
switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_27:
case RTL_GIGA_MAC_VER_28:
case RTL_GIGA_MAC_VER_31:
rtl_loop_wait_low(tp, &rtl_npq_cond, 20, 2000);
@@ -5254,7 +5217,7 @@ static int rtl_get_ether_clk(struct rtl8169_private *tp)
static void rtl_init_mac_address(struct rtl8169_private *tp)
{
struct net_device *dev = tp->dev;
- u8 *mac_addr = dev->dev_addr;
+ u8 mac_addr[ETH_ALEN];
int rc;
rc = eth_platform_get_mac_address(tp_to_dev(tp), mac_addr);
@@ -5272,6 +5235,7 @@ static void rtl_init_mac_address(struct rtl8169_private *tp)
eth_hw_addr_random(dev);
dev_warn(tp_to_dev(tp), "can't read MAC address, setting random one\n");
done:
+ eth_hw_addr_set(dev, mac_addr);
rtl_rar_set(tp, mac_addr);
}
diff --git a/drivers/net/ethernet/realtek/r8169_phy_config.c b/drivers/net/ethernet/realtek/r8169_phy_config.c
index 50f0f621b1aa..f7ad5487879b 100644
--- a/drivers/net/ethernet/realtek/r8169_phy_config.c
+++ b/drivers/net/ethernet/realtek/r8169_phy_config.c
@@ -548,64 +548,6 @@ static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp,
rtl8168d_apply_firmware_cond(tp, phydev, 0xb300);
}
-static void rtl8168d_3_hw_phy_config(struct rtl8169_private *tp,
- struct phy_device *phydev)
-{
- static const struct phy_reg phy_reg_init[] = {
- { 0x1f, 0x0002 },
- { 0x10, 0x0008 },
- { 0x0d, 0x006c },
-
- { 0x1f, 0x0000 },
- { 0x0d, 0xf880 },
-
- { 0x1f, 0x0001 },
- { 0x17, 0x0cc0 },
-
- { 0x1f, 0x0001 },
- { 0x0b, 0xa4d8 },
- { 0x09, 0x281c },
- { 0x07, 0x2883 },
- { 0x0a, 0x6b35 },
- { 0x1d, 0x3da4 },
- { 0x1c, 0xeffd },
- { 0x14, 0x7f52 },
- { 0x18, 0x7fc6 },
- { 0x08, 0x0601 },
- { 0x06, 0x4063 },
- { 0x10, 0xf074 },
- { 0x1f, 0x0003 },
- { 0x13, 0x0789 },
- { 0x12, 0xf4bd },
- { 0x1a, 0x04fd },
- { 0x14, 0x84b0 },
- { 0x1f, 0x0000 },
- { 0x00, 0x9200 },
-
- { 0x1f, 0x0005 },
- { 0x01, 0x0340 },
- { 0x1f, 0x0001 },
- { 0x04, 0x4000 },
- { 0x03, 0x1d21 },
- { 0x02, 0x0c32 },
- { 0x01, 0x0200 },
- { 0x00, 0x5554 },
- { 0x04, 0x4800 },
- { 0x04, 0x4000 },
- { 0x04, 0xf000 },
- { 0x03, 0xdf01 },
- { 0x02, 0xdf20 },
- { 0x01, 0x101a },
- { 0x00, 0xa0ff },
- { 0x04, 0xf800 },
- { 0x04, 0xf000 },
- { 0x1f, 0x0000 },
- };
-
- rtl_writephy_batch(phydev, phy_reg_init);
- r8168d_modify_extpage(phydev, 0x0023, 0x16, 0xffff, 0x0000);
-}
-
static void rtl8168d_4_hw_phy_config(struct rtl8169_private *tp,
struct phy_device *phydev)
{
@@ -1332,7 +1274,6 @@ void r8169_hw_phy_config(struct rtl8169_private *tp, struct phy_device *phydev,
[RTL_GIGA_MAC_VER_24] = rtl8168cp_2_hw_phy_config,
[RTL_GIGA_MAC_VER_25] = rtl8168d_1_hw_phy_config,
[RTL_GIGA_MAC_VER_26] = rtl8168d_2_hw_phy_config,
- [RTL_GIGA_MAC_VER_27] = rtl8168d_3_hw_phy_config,
[RTL_GIGA_MAC_VER_28] = rtl8168d_4_hw_phy_config,
[RTL_GIGA_MAC_VER_29] = rtl8105e_hw_phy_config,
[RTL_GIGA_MAC_VER_30] = rtl8105e_hw_phy_config,
diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h
index 47c5377e4f42..08062d73df10 100644
--- a/drivers/net/ethernet/renesas/ravb.h
+++ b/drivers/net/ethernet/renesas/ravb.h
@@ -81,6 +81,7 @@ enum ravb_reg {
RQC3 = 0x00A0,
RQC4 = 0x00A4,
RPC = 0x00B0,
+ RTC = 0x00B4, /* R-Car Gen3 and RZ/G2L only */
UFCW = 0x00BC,
UFCS = 0x00C0,
UFCV0 = 0x00C4,
@@ -187,19 +188,23 @@ enum ravb_reg {
PIR = 0x0520,
PSR = 0x0528,
PIPR = 0x052c,
+ CXR31 = 0x0530, /* RZ/G2L only */
MPR = 0x0558,
PFTCR = 0x055c,
PFRCR = 0x0560,
GECMR = 0x05b0,
MAHR = 0x05c0,
MALR = 0x05c8,
- TROCR = 0x0700, /* R-Car Gen3 only */
+ TROCR = 0x0700, /* R-Car Gen3 and RZ/G2L only */
+ CXR41 = 0x0708, /* RZ/G2L only */
+ CXR42 = 0x0710, /* RZ/G2L only */
CEFCR = 0x0740,
FRECR = 0x0748,
TSFRCR = 0x0750,
TLFRCR = 0x0758,
RFCR = 0x0760,
MAFCR = 0x0778,
+ CSR0 = 0x0800, /* RZ/G2L only */
};
@@ -810,10 +815,11 @@ enum ECMR_BIT {
ECMR_TXF = 0x00010000, /* Documented for R-Car Gen3 only */
ECMR_RXF = 0x00020000,
ECMR_PFR = 0x00040000,
- ECMR_ZPF = 0x00080000, /* Documented for R-Car Gen3 only */
+ ECMR_ZPF = 0x00080000, /* Documented for R-Car Gen3 and RZ/G2L */
ECMR_RZPF = 0x00100000,
ECMR_DPAD = 0x00200000,
ECMR_RCSC = 0x00800000,
+ ECMR_RCPT = 0x02000000, /* Documented for RZ/G2L only */
ECMR_TRCCM = 0x04000000,
};
@@ -823,6 +829,7 @@ enum ECSR_BIT {
ECSR_MPD = 0x00000002,
ECSR_LCHNG = 0x00000004,
ECSR_PHYI = 0x00000008,
+ ECSR_PFRI = 0x00000010, /* Documented for R-Car Gen3 and RZ/G2L */
};
/* ECSIPR */
@@ -857,9 +864,13 @@ enum MPR_BIT {
/* GECMR */
enum GECMR_BIT {
- GECMR_SPEED = 0x00000001,
- GECMR_SPEED_100 = 0x00000000,
- GECMR_SPEED_1000 = 0x00000001,
+ GECMR_SPEED = 0x00000001,
+ GECMR_SPEED_100 = 0x00000000,
+ GECMR_SPEED_1000 = 0x00000001,
+ GBETH_GECMR_SPEED = 0x00000030,
+ GBETH_GECMR_SPEED_10 = 0x00000000,
+ GBETH_GECMR_SPEED_100 = 0x00000010,
+ GBETH_GECMR_SPEED_1000 = 0x00000020,
};
/* The Ethernet AVB descriptor definitions. */
@@ -949,6 +960,16 @@ enum RAVB_QUEUE {
RAVB_NC, /* Network Control Queue */
};
+enum CXR31_BIT {
+ CXR31_SEL_LINK0 = 0x00000001,
+ CXR31_SEL_LINK1 = 0x00000008,
+};
+
+enum CSR0_BIT {
+ CSR0_TPE = 0x00000010,
+ CSR0_RPE = 0x00000020,
+};
+
#define DBAT_ENTRY_NUM 22
#define RX_QUEUE_OFFSET 4
#define NUM_RX_QUEUE 2
@@ -956,6 +977,9 @@ enum RAVB_QUEUE {
#define RX_BUF_SZ (2048 - ETH_FCS_LEN + sizeof(__sum16))
+#define GBETH_RX_BUFF_MAX 8192
+#define GBETH_RX_DESC_DATA_SIZE 4080
+
struct ravb_tstamp_skb {
struct list_head list;
struct sk_buff *skb;
@@ -985,8 +1009,8 @@ struct ravb_hw_info {
void *(*alloc_rx_desc)(struct net_device *ndev, int q);
bool (*receive)(struct net_device *ndev, int *quota, int q);
void (*set_rate)(struct net_device *ndev);
- int (*set_rx_csum_feature)(struct net_device *ndev, netdev_features_t features);
- void (*dmac_init)(struct net_device *ndev);
+ int (*set_feature)(struct net_device *ndev, netdev_features_t features);
+ int (*dmac_init)(struct net_device *ndev);
void (*emac_init)(struct net_device *ndev);
const char (*gstrings_stats)[ETH_GSTRING_LEN];
size_t gstrings_size;
@@ -994,14 +1018,20 @@ struct ravb_hw_info {
netdev_features_t net_features;
int stats_len;
size_t max_rx_len;
+ u32 tccr_mask;
+ u32 rx_max_buf_size;
unsigned aligned_tx: 1;
/* hardware features */
unsigned internal_delay:1; /* AVB-DMAC has internal delays */
unsigned tx_counters:1; /* E-MAC has TX counters */
+ unsigned carrier_counters:1; /* E-MAC has carrier counters */
unsigned multi_irqs:1; /* AVB-DMAC and E-MAC has multiple irqs */
- unsigned no_ptp_cfg_active:1; /* AVB-DMAC does not support gPTP active in config mode */
- unsigned ptp_cfg_active:1; /* AVB-DMAC has gPTP support active in config mode */
+ unsigned gptp:1; /* AVB-DMAC has gPTP support */
+ unsigned ccc_gac:1; /* AVB-DMAC has gPTP support active in config mode */
+ unsigned nc_queues:1; /* AVB-DMAC has RX and TX NC queues */
+ unsigned magic_pkt:1; /* E-MAC supports magic packet detection */
+ unsigned half_duplex:1; /* E-MAC supports half duplex mode */
};
struct ravb_private {
@@ -1018,9 +1048,11 @@ struct ravb_private {
struct ravb_desc *desc_bat;
dma_addr_t rx_desc_dma[NUM_RX_QUEUE];
dma_addr_t tx_desc_dma[NUM_TX_QUEUE];
+ struct ravb_rx_desc *gbeth_rx_ring;
struct ravb_ex_rx_desc *rx_ring[NUM_RX_QUEUE];
struct ravb_tx_desc *tx_ring[NUM_TX_QUEUE];
void *tx_align[NUM_TX_QUEUE];
+ struct sk_buff *rx_1st_skb;
struct sk_buff **rx_skb[NUM_RX_QUEUE];
struct sk_buff **tx_skb[NUM_TX_QUEUE];
u32 rx_over_errors;
@@ -1056,6 +1088,8 @@ struct ravb_private {
unsigned rgmii_override:1; /* Deprecated rgmii-*id behavior */
unsigned int num_tx_desc; /* TX descriptors per packet */
+ int duplex;
+
const struct ravb_hw_info *info;
struct reset_control *rstc;
};
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 0f85f2d97b18..b4c597f4040c 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -83,7 +83,24 @@ static int ravb_config(struct net_device *ndev)
return error;
}
-static void ravb_set_rate(struct net_device *ndev)
+static void ravb_set_rate_gbeth(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+
+ switch (priv->speed) {
+ case 10: /* 10BASE */
+ ravb_write(ndev, GBETH_GECMR_SPEED_10, GECMR);
+ break;
+ case 100: /* 100BASE */
+ ravb_write(ndev, GBETH_GECMR_SPEED_100, GECMR);
+ break;
+ case 1000: /* 1000BASE */
+ ravb_write(ndev, GBETH_GECMR_SPEED_1000, GECMR);
+ break;
+ }
+}
+
+static void ravb_set_rate_rcar(struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
@@ -115,17 +132,19 @@ static void ravb_read_mac_address(struct device_node *np,
{
int ret;
- ret = of_get_mac_address(np, ndev->dev_addr);
+ ret = of_get_ethdev_address(np, ndev);
if (ret) {
u32 mahr = ravb_read(ndev, MAHR);
u32 malr = ravb_read(ndev, MALR);
+ u8 addr[ETH_ALEN];
- ndev->dev_addr[0] = (mahr >> 24) & 0xFF;
- ndev->dev_addr[1] = (mahr >> 16) & 0xFF;
- ndev->dev_addr[2] = (mahr >> 8) & 0xFF;
- ndev->dev_addr[3] = (mahr >> 0) & 0xFF;
- ndev->dev_addr[4] = (malr >> 8) & 0xFF;
- ndev->dev_addr[5] = (malr >> 0) & 0xFF;
+ addr[0] = (mahr >> 24) & 0xFF;
+ addr[1] = (mahr >> 16) & 0xFF;
+ addr[2] = (mahr >> 8) & 0xFF;
+ addr[3] = (mahr >> 0) & 0xFF;
+ addr[4] = (malr >> 8) & 0xFF;
+ addr[5] = (malr >> 0) & 0xFF;
+ eth_hw_addr_set(ndev, addr);
}
}
@@ -217,7 +236,32 @@ static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only)
return free_num;
}
-static void ravb_rx_ring_free(struct net_device *ndev, int q)
+static void ravb_rx_ring_free_gbeth(struct net_device *ndev, int q)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ unsigned int ring_size;
+ unsigned int i;
+
+ if (!priv->gbeth_rx_ring)
+ return;
+
+ for (i = 0; i < priv->num_rx_ring[q]; i++) {
+ struct ravb_rx_desc *desc = &priv->gbeth_rx_ring[i];
+
+ if (!dma_mapping_error(ndev->dev.parent,
+ le32_to_cpu(desc->dptr)))
+ dma_unmap_single(ndev->dev.parent,
+ le32_to_cpu(desc->dptr),
+ GBETH_RX_BUFF_MAX,
+ DMA_FROM_DEVICE);
+ }
+ ring_size = sizeof(struct ravb_rx_desc) * (priv->num_rx_ring[q] + 1);
+ dma_free_coherent(ndev->dev.parent, ring_size, priv->gbeth_rx_ring,
+ priv->rx_desc_dma[q]);
+ priv->gbeth_rx_ring = NULL;
+}
+
+static void ravb_rx_ring_free_rcar(struct net_device *ndev, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
unsigned int ring_size;
@@ -283,7 +327,38 @@ static void ravb_ring_free(struct net_device *ndev, int q)
priv->tx_skb[q] = NULL;
}
-static void ravb_rx_ring_format(struct net_device *ndev, int q)
+static void ravb_rx_ring_format_gbeth(struct net_device *ndev, int q)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ struct ravb_rx_desc *rx_desc;
+ unsigned int rx_ring_size;
+ dma_addr_t dma_addr;
+ unsigned int i;
+
+ rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q];
+ memset(priv->gbeth_rx_ring, 0, rx_ring_size);
+ /* Build RX ring buffer */
+ for (i = 0; i < priv->num_rx_ring[q]; i++) {
+ /* RX descriptor */
+ rx_desc = &priv->gbeth_rx_ring[i];
+ rx_desc->ds_cc = cpu_to_le16(GBETH_RX_DESC_DATA_SIZE);
+ dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data,
+ GBETH_RX_BUFF_MAX,
+ DMA_FROM_DEVICE);
+ /* We just set the data size to 0 for a failed mapping which
+ * should prevent DMA from happening...
+ */
+ if (dma_mapping_error(ndev->dev.parent, dma_addr))
+ rx_desc->ds_cc = cpu_to_le16(0);
+ rx_desc->dptr = cpu_to_le32(dma_addr);
+ rx_desc->die_dt = DT_FEMPTY;
+ }
+ rx_desc = &priv->gbeth_rx_ring[i];
+ rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
+ rx_desc->die_dt = DT_LINKFIX; /* type */
+}
+
+static void ravb_rx_ring_format_rcar(struct net_device *ndev, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
struct ravb_ex_rx_desc *rx_desc;
@@ -356,7 +431,20 @@ static void ravb_ring_format(struct net_device *ndev, int q)
desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]);
}
-static void *ravb_alloc_rx_desc(struct net_device *ndev, int q)
+static void *ravb_alloc_rx_desc_gbeth(struct net_device *ndev, int q)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ unsigned int ring_size;
+
+ ring_size = sizeof(struct ravb_rx_desc) * (priv->num_rx_ring[q] + 1);
+
+ priv->gbeth_rx_ring = dma_alloc_coherent(ndev->dev.parent, ring_size,
+ &priv->rx_desc_dma[q],
+ GFP_KERNEL);
+ return priv->gbeth_rx_ring;
+}
+
+static void *ravb_alloc_rx_desc_rcar(struct net_device *ndev, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
unsigned int ring_size;
@@ -426,7 +514,37 @@ error:
return -ENOMEM;
}
-static void ravb_rcar_emac_init(struct net_device *ndev)
+static void ravb_emac_init_gbeth(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+
+ /* Receive frame limit set register */
+ ravb_write(ndev, GBETH_RX_BUFF_MAX + ETH_FCS_LEN, RFLR);
+
+ /* EMAC Mode: PAUSE prohibition; Duplex; TX; RX; CRC Pass Through */
+ ravb_write(ndev, ECMR_ZPF | ((priv->duplex > 0) ? ECMR_DM : 0) |
+ ECMR_TE | ECMR_RE | ECMR_RCPT |
+ ECMR_TXF | ECMR_RXF, ECMR);
+
+ ravb_set_rate_gbeth(ndev);
+
+ /* Set MAC address */
+ ravb_write(ndev,
+ (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
+ (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR);
+ ravb_write(ndev, (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR);
+
+ /* E-MAC status register clear */
+ ravb_write(ndev, ECSR_ICD | ECSR_LCHNG | ECSR_PFRI, ECSR);
+ ravb_write(ndev, CSR0_TPE | CSR0_RPE, CSR0);
+
+ /* E-MAC interrupt enable register */
+ ravb_write(ndev, ECSIPR_ICDIP, ECSIPR);
+
+ ravb_modify(ndev, CXR31, CXR31_SEL_LINK0 | CXR31_SEL_LINK1, CXR31_SEL_LINK0);
+}
+
+static void ravb_emac_init_rcar(struct net_device *ndev)
{
/* Receive frame limit set register */
ravb_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN, RFLR);
@@ -436,7 +554,7 @@ static void ravb_rcar_emac_init(struct net_device *ndev)
(ndev->features & NETIF_F_RXCSUM ? ECMR_RCSC : 0) |
ECMR_TE | ECMR_RE, ECMR);
- ravb_set_rate(ndev);
+ ravb_set_rate_rcar(ndev);
/* Set MAC address */
ravb_write(ndev,
@@ -461,10 +579,58 @@ static void ravb_emac_init(struct net_device *ndev)
info->emac_init(ndev);
}
-static void ravb_rcar_dmac_init(struct net_device *ndev)
+static int ravb_dmac_init_gbeth(struct net_device *ndev)
+{
+ int error;
+
+ error = ravb_ring_init(ndev, RAVB_BE);
+ if (error)
+ return error;
+
+ /* Descriptor format */
+ ravb_ring_format(ndev, RAVB_BE);
+
+ /* Set DMAC RX */
+ ravb_write(ndev, 0x60000000, RCR);
+
+ /* Set Max Frame Length (RTC) */
+ ravb_write(ndev, 0x7ffc0000 | GBETH_RX_BUFF_MAX, RTC);
+
+ /* Set FIFO size */
+ ravb_write(ndev, 0x00222200, TGC);
+
+ ravb_write(ndev, 0, TCCR);
+
+ /* Frame receive */
+ ravb_write(ndev, RIC0_FRE0, RIC0);
+ /* Disable FIFO full warning */
+ ravb_write(ndev, 0x0, RIC1);
+ /* Receive FIFO full error, descriptor empty */
+ ravb_write(ndev, RIC2_QFE0 | RIC2_RFFE, RIC2);
+
+ ravb_write(ndev, TIC_FTE0, TIC);
+
+ return 0;
+}
+
+static int ravb_dmac_init_rcar(struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
+ int error;
+
+ error = ravb_ring_init(ndev, RAVB_BE);
+ if (error)
+ return error;
+ error = ravb_ring_init(ndev, RAVB_NC);
+ if (error) {
+ ravb_ring_free(ndev, RAVB_BE);
+ return error;
+ }
+
+ /* Descriptor format */
+ ravb_ring_format(ndev, RAVB_BE);
+ ravb_ring_format(ndev, RAVB_NC);
/* Set AVB RX */
ravb_write(ndev,
@@ -491,6 +657,8 @@ static void ravb_rcar_dmac_init(struct net_device *ndev)
ravb_write(ndev, RIC2_QFE0 | RIC2_QFE1 | RIC2_RFFE, RIC2);
/* Frame transmitted, timestamp FIFO updated */
ravb_write(ndev, TIC_FTE0 | TIC_FTE1 | TIC_TFUE, TIC);
+
+ return 0;
}
/* Device init function for Ethernet AVB */
@@ -505,20 +673,9 @@ static int ravb_dmac_init(struct net_device *ndev)
if (error)
return error;
- error = ravb_ring_init(ndev, RAVB_BE);
+ error = info->dmac_init(ndev);
if (error)
return error;
- error = ravb_ring_init(ndev, RAVB_NC);
- if (error) {
- ravb_ring_free(ndev, RAVB_BE);
- return error;
- }
-
- /* Descriptor format */
- ravb_ring_format(ndev, RAVB_BE);
- ravb_ring_format(ndev, RAVB_NC);
-
- info->dmac_init(ndev);
/* Setting the control will start the AVB-DMAC process. */
ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_OPERATION);
@@ -579,7 +736,151 @@ static void ravb_rx_csum(struct sk_buff *skb)
skb_trim(skb, skb->len - sizeof(__sum16));
}
-static bool ravb_rcar_rx(struct net_device *ndev, int *quota, int q)
+static struct sk_buff *ravb_get_skb_gbeth(struct net_device *ndev, int entry,
+ struct ravb_rx_desc *desc)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ struct sk_buff *skb;
+
+ skb = priv->rx_skb[RAVB_BE][entry];
+ priv->rx_skb[RAVB_BE][entry] = NULL;
+ dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
+ ALIGN(GBETH_RX_BUFF_MAX, 16), DMA_FROM_DEVICE);
+
+ return skb;
+}
+
+/* Packet receive function for Gigabit Ethernet */
+static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ const struct ravb_hw_info *info = priv->info;
+ struct net_device_stats *stats;
+ struct ravb_rx_desc *desc;
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
+ u8 desc_status;
+ int boguscnt;
+ u16 pkt_len;
+ u8 die_dt;
+ int entry;
+ int limit;
+
+ entry = priv->cur_rx[q] % priv->num_rx_ring[q];
+ boguscnt = priv->dirty_rx[q] + priv->num_rx_ring[q] - priv->cur_rx[q];
+ stats = &priv->stats[q];
+
+ boguscnt = min(boguscnt, *quota);
+ limit = boguscnt;
+ desc = &priv->gbeth_rx_ring[entry];
+ while (desc->die_dt != DT_FEMPTY) {
+ /* Descriptor type must be checked before all other reads */
+ dma_rmb();
+ desc_status = desc->msc;
+ pkt_len = le16_to_cpu(desc->ds_cc) & RX_DS;
+
+ if (--boguscnt < 0)
+ break;
+
+ /* We use 0-byte descriptors to mark the DMA mapping errors */
+ if (!pkt_len)
+ continue;
+
+ if (desc_status & MSC_MC)
+ stats->multicast++;
+
+ if (desc_status & (MSC_CRC | MSC_RFE | MSC_RTSF | MSC_RTLF | MSC_CEEF)) {
+ stats->rx_errors++;
+ if (desc_status & MSC_CRC)
+ stats->rx_crc_errors++;
+ if (desc_status & MSC_RFE)
+ stats->rx_frame_errors++;
+ if (desc_status & (MSC_RTLF | MSC_RTSF))
+ stats->rx_length_errors++;
+ if (desc_status & MSC_CEEF)
+ stats->rx_missed_errors++;
+ } else {
+ die_dt = desc->die_dt & 0xF0;
+ switch (die_dt) {
+ case DT_FSINGLE:
+ skb = ravb_get_skb_gbeth(ndev, entry, desc);
+ skb_put(skb, pkt_len);
+ skb->protocol = eth_type_trans(skb, ndev);
+ napi_gro_receive(&priv->napi[q], skb);
+ stats->rx_packets++;
+ stats->rx_bytes += pkt_len;
+ break;
+ case DT_FSTART:
+ priv->rx_1st_skb = ravb_get_skb_gbeth(ndev, entry, desc);
+ skb_put(priv->rx_1st_skb, pkt_len);
+ break;
+ case DT_FMID:
+ skb = ravb_get_skb_gbeth(ndev, entry, desc);
+ skb_copy_to_linear_data_offset(priv->rx_1st_skb,
+ priv->rx_1st_skb->len,
+ skb->data,
+ pkt_len);
+ skb_put(priv->rx_1st_skb, pkt_len);
+ dev_kfree_skb(skb);
+ break;
+ case DT_FEND:
+ skb = ravb_get_skb_gbeth(ndev, entry, desc);
+ skb_copy_to_linear_data_offset(priv->rx_1st_skb,
+ priv->rx_1st_skb->len,
+ skb->data,
+ pkt_len);
+ skb_put(priv->rx_1st_skb, pkt_len);
+ dev_kfree_skb(skb);
+ priv->rx_1st_skb->protocol =
+ eth_type_trans(priv->rx_1st_skb, ndev);
+ napi_gro_receive(&priv->napi[q],
+ priv->rx_1st_skb);
+ stats->rx_packets++;
+ stats->rx_bytes += priv->rx_1st_skb->len;
+ break;
+ }
+ }
+
+ entry = (++priv->cur_rx[q]) % priv->num_rx_ring[q];
+ desc = &priv->gbeth_rx_ring[entry];
+ }
+
+ /* Refill the RX ring buffers. */
+ for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) {
+ entry = priv->dirty_rx[q] % priv->num_rx_ring[q];
+ desc = &priv->gbeth_rx_ring[entry];
+ desc->ds_cc = cpu_to_le16(GBETH_RX_DESC_DATA_SIZE);
+
+ if (!priv->rx_skb[q][entry]) {
+ skb = netdev_alloc_skb(ndev, info->max_rx_len);
+ if (!skb)
+ break;
+ ravb_set_buffer_align(skb);
+ dma_addr = dma_map_single(ndev->dev.parent,
+ skb->data,
+ GBETH_RX_BUFF_MAX,
+ DMA_FROM_DEVICE);
+ skb_checksum_none_assert(skb);
+ /* We just set the data size to 0 for a failed mapping
+ * which should prevent DMA from happening...
+ */
+ if (dma_mapping_error(ndev->dev.parent, dma_addr))
+ desc->ds_cc = cpu_to_le16(0);
+ desc->dptr = cpu_to_le32(dma_addr);
+ priv->rx_skb[q][entry] = skb;
+ }
+ /* Descriptor type must be set after all the above writes */
+ dma_wmb();
+ desc->die_dt = DT_FEMPTY;
+ }
+
+ *quota -= limit - (++boguscnt);
+
+ return boguscnt <= 0;
+}
+
+/* Packet receive function for Ethernet AVB */
+static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
@@ -717,11 +1018,13 @@ static void ravb_rcv_snd_enable(struct net_device *ndev)
/* function for waiting dma process finished */
static int ravb_stop_dma(struct net_device *ndev)
{
+ struct ravb_private *priv = netdev_priv(ndev);
+ const struct ravb_hw_info *info = priv->info;
int error;
/* Wait for stopping the hardware TX process */
- error = ravb_wait(ndev, TCCR,
- TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3, 0);
+ error = ravb_wait(ndev, TCCR, info->tccr_mask, 0);
+
if (error)
return error;
@@ -859,6 +1162,7 @@ static irqreturn_t ravb_interrupt(int irq, void *dev_id)
{
struct net_device *ndev = dev_id;
struct ravb_private *priv = netdev_priv(ndev);
+ const struct ravb_hw_info *info = priv->info;
irqreturn_t result = IRQ_NONE;
u32 iss;
@@ -875,8 +1179,13 @@ static irqreturn_t ravb_interrupt(int irq, void *dev_id)
result = IRQ_HANDLED;
/* Network control and best effort queue RX/TX */
- for (q = RAVB_NC; q >= RAVB_BE; q--) {
- if (ravb_queue_interrupt(ndev, q))
+ if (info->nc_queues) {
+ for (q = RAVB_NC; q >= RAVB_BE; q--) {
+ if (ravb_queue_interrupt(ndev, q))
+ result = IRQ_HANDLED;
+ }
+ } else {
+ if (ravb_queue_interrupt(ndev, RAVB_BE))
result = IRQ_HANDLED;
}
}
@@ -966,16 +1275,25 @@ static int ravb_poll(struct napi_struct *napi, int budget)
struct net_device *ndev = napi->dev;
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
+ bool gptp = info->gptp || info->ccc_gac;
+ struct ravb_rx_desc *desc;
unsigned long flags;
int q = napi - priv->napi;
int mask = BIT(q);
int quota = budget;
+ unsigned int entry;
+ if (!gptp) {
+ entry = priv->cur_rx[q] % priv->num_rx_ring[q];
+ desc = &priv->gbeth_rx_ring[entry];
+ }
/* Processing RX Descriptor Ring */
/* Clear RX interrupt */
ravb_write(ndev, ~(mask | RIS0_RESERVED), RIS0);
- if (ravb_rx(ndev, &quota, q))
- goto out;
+ if (gptp || desc->die_dt != DT_FEMPTY) {
+ if (ravb_rx(ndev, &quota, q))
+ goto out;
+ }
/* Processing TX Descriptor Ring */
spin_lock_irqsave(&priv->lock, flags);
@@ -1000,7 +1318,8 @@ static int ravb_poll(struct napi_struct *napi, int budget)
/* Receive error message handling */
priv->rx_over_errors = priv->stats[RAVB_BE].rx_over_errors;
- priv->rx_over_errors += priv->stats[RAVB_NC].rx_over_errors;
+ if (info->nc_queues)
+ priv->rx_over_errors += priv->stats[RAVB_NC].rx_over_errors;
if (priv->rx_over_errors != ndev->stats.rx_over_errors)
ndev->stats.rx_over_errors = priv->rx_over_errors;
if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors)
@@ -1009,6 +1328,13 @@ out:
return budget - quota;
}
+static void ravb_set_duplex_gbeth(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+
+ ravb_modify(ndev, ECMR, ECMR_DM, priv->duplex > 0 ? ECMR_DM : 0);
+}
+
/* PHY state control function */
static void ravb_adjust_link(struct net_device *ndev)
{
@@ -1025,6 +1351,12 @@ static void ravb_adjust_link(struct net_device *ndev)
ravb_rcv_snd_disable(ndev);
if (phydev->link) {
+ if (info->half_duplex && phydev->duplex != priv->duplex) {
+ new_state = true;
+ priv->duplex = phydev->duplex;
+ ravb_set_duplex_gbeth(ndev);
+ }
+
if (phydev->speed != priv->speed) {
new_state = true;
priv->speed = phydev->speed;
@@ -1039,6 +1371,8 @@ static void ravb_adjust_link(struct net_device *ndev)
new_state = true;
priv->link = 0;
priv->speed = 0;
+ if (info->half_duplex)
+ priv->duplex = -1;
}
/* Enable TX and RX right over here, if E-MAC change is ignored */
@@ -1061,6 +1395,7 @@ static int ravb_phy_init(struct net_device *ndev)
{
struct device_node *np = ndev->dev.parent->of_node;
struct ravb_private *priv = netdev_priv(ndev);
+ const struct ravb_hw_info *info = priv->info;
struct phy_device *phydev;
struct device_node *pn;
phy_interface_t iface;
@@ -1068,6 +1403,7 @@ static int ravb_phy_init(struct net_device *ndev)
priv->link = 0;
priv->speed = 0;
+ priv->duplex = -1;
/* Try connecting to PHY */
pn = of_parse_phandle(np, "phy-handle", 0);
@@ -1106,15 +1442,17 @@ static int ravb_phy_init(struct net_device *ndev)
netdev_info(ndev, "limited PHY to 100Mbit/s\n");
}
- /* 10BASE, Pause and Asym Pause is not supported */
- phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
- phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT);
- phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_Pause_BIT);
- phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_Asym_Pause_BIT);
+ if (!info->half_duplex) {
+ /* 10BASE, Pause and Asym Pause is not supported */
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_Pause_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_Asym_Pause_BIT);
- /* Half Duplex is not supported */
- phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
- phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
+ /* Half Duplex is not supported */
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
+ }
phy_attached_info(phydev);
@@ -1157,6 +1495,24 @@ static void ravb_set_msglevel(struct net_device *ndev, u32 value)
priv->msg_enable = value;
}
+static const char ravb_gstrings_stats_gbeth[][ETH_GSTRING_LEN] = {
+ "rx_queue_0_current",
+ "tx_queue_0_current",
+ "rx_queue_0_dirty",
+ "tx_queue_0_dirty",
+ "rx_queue_0_packets",
+ "tx_queue_0_packets",
+ "rx_queue_0_bytes",
+ "tx_queue_0_bytes",
+ "rx_queue_0_mcast_packets",
+ "rx_queue_0_errors",
+ "rx_queue_0_crc_errors",
+ "rx_queue_0_frame_errors",
+ "rx_queue_0_length_errors",
+ "rx_queue_0_csum_offload_errors",
+ "rx_queue_0_over_errors",
+};
+
static const char ravb_gstrings_stats[][ETH_GSTRING_LEN] = {
"rx_queue_0_current",
"tx_queue_0_current",
@@ -1208,11 +1564,14 @@ static void ravb_get_ethtool_stats(struct net_device *ndev,
struct ethtool_stats *estats, u64 *data)
{
struct ravb_private *priv = netdev_priv(ndev);
+ const struct ravb_hw_info *info = priv->info;
+ int num_rx_q;
int i = 0;
int q;
+ num_rx_q = info->nc_queues ? NUM_RX_QUEUE : 1;
/* Device-specific stats */
- for (q = RAVB_BE; q < NUM_RX_QUEUE; q++) {
+ for (q = RAVB_BE; q < num_rx_q; q++) {
struct net_device_stats *stats = &priv->stats[q];
data[i++] = priv->cur_rx[q];
@@ -1274,7 +1633,7 @@ static int ravb_set_ringparam(struct net_device *ndev,
if (netif_running(ndev)) {
netif_device_detach(ndev);
/* Stop PTP Clock driver */
- if (info->no_ptp_cfg_active)
+ if (info->gptp)
ravb_ptp_stop(ndev);
/* Wait for DMA stopping */
error = ravb_stop_dma(ndev);
@@ -1287,7 +1646,8 @@ static int ravb_set_ringparam(struct net_device *ndev,
/* Free all the skb's in the RX queue and the DMA buffers. */
ravb_ring_free(ndev, RAVB_BE);
- ravb_ring_free(ndev, RAVB_NC);
+ if (info->nc_queues)
+ ravb_ring_free(ndev, RAVB_NC);
}
/* Set new parameters */
@@ -1306,7 +1666,7 @@ static int ravb_set_ringparam(struct net_device *ndev,
ravb_emac_init(ndev);
/* Initialise PTP Clock driver */
- if (info->no_ptp_cfg_active)
+ if (info->gptp)
ravb_ptp_init(ndev, priv->pdev);
netif_device_attach(ndev);
@@ -1319,6 +1679,7 @@ static int ravb_get_ts_info(struct net_device *ndev,
struct ethtool_ts_info *info)
{
struct ravb_private *priv = netdev_priv(ndev);
+ const struct ravb_hw_info *hw_info = priv->info;
info->so_timestamping =
SOF_TIMESTAMPING_TX_SOFTWARE |
@@ -1332,7 +1693,8 @@ static int ravb_get_ts_info(struct net_device *ndev,
(1 << HWTSTAMP_FILTER_NONE) |
(1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
(1 << HWTSTAMP_FILTER_ALL);
- info->phc_index = ptp_clock_index(priv->ptp.clock);
+ if (hw_info->gptp || hw_info->ccc_gac)
+ info->phc_index = ptp_clock_index(priv->ptp.clock);
return 0;
}
@@ -1348,8 +1710,9 @@ static void ravb_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
static int ravb_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
{
struct ravb_private *priv = netdev_priv(ndev);
+ const struct ravb_hw_info *info = priv->info;
- if (wol->wolopts & ~WAKE_MAGIC)
+ if (!info->magic_pkt || (wol->wolopts & ~WAKE_MAGIC))
return -EOPNOTSUPP;
priv->wol_enabled = !!(wol->wolopts & WAKE_MAGIC);
@@ -1403,7 +1766,8 @@ static int ravb_open(struct net_device *ndev)
int error;
napi_enable(&priv->napi[RAVB_BE]);
- napi_enable(&priv->napi[RAVB_NC]);
+ if (info->nc_queues)
+ napi_enable(&priv->napi[RAVB_NC]);
if (!info->multi_irqs) {
error = request_irq(ndev->irq, ravb_interrupt, IRQF_SHARED,
@@ -1446,7 +1810,7 @@ static int ravb_open(struct net_device *ndev)
ravb_emac_init(ndev);
/* Initialise PTP Clock driver */
- if (info->no_ptp_cfg_active)
+ if (info->gptp)
ravb_ptp_init(ndev, priv->pdev);
netif_tx_start_all_queues(ndev);
@@ -1460,7 +1824,7 @@ static int ravb_open(struct net_device *ndev)
out_ptp_stop:
/* Stop PTP Clock driver */
- if (info->no_ptp_cfg_active)
+ if (info->gptp)
ravb_ptp_stop(ndev);
out_free_irq_nc_tx:
if (!info->multi_irqs)
@@ -1477,7 +1841,8 @@ out_free_irq_emac:
out_free_irq:
free_irq(ndev->irq, ndev);
out_napi_off:
- napi_disable(&priv->napi[RAVB_NC]);
+ if (info->nc_queues)
+ napi_disable(&priv->napi[RAVB_NC]);
napi_disable(&priv->napi[RAVB_BE]);
return error;
}
@@ -1508,7 +1873,7 @@ static void ravb_tx_timeout_work(struct work_struct *work)
netif_tx_stop_all_queues(ndev);
/* Stop PTP Clock driver */
- if (info->no_ptp_cfg_active)
+ if (info->gptp)
ravb_ptp_stop(ndev);
/* Wait for DMA stopping */
@@ -1526,7 +1891,8 @@ static void ravb_tx_timeout_work(struct work_struct *work)
}
ravb_ring_free(ndev, RAVB_BE);
- ravb_ring_free(ndev, RAVB_NC);
+ if (info->nc_queues)
+ ravb_ring_free(ndev, RAVB_NC);
/* Device init */
error = ravb_dmac_init(ndev);
@@ -1543,7 +1909,7 @@ static void ravb_tx_timeout_work(struct work_struct *work)
out:
/* Initialise PTP Clock driver */
- if (info->no_ptp_cfg_active)
+ if (info->gptp)
ravb_ptp_init(ndev, priv->pdev);
netif_tx_start_all_queues(ndev);
@@ -1553,6 +1919,7 @@ out:
static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
+ const struct ravb_hw_info *info = priv->info;
unsigned int num_tx_desc = priv->num_tx_desc;
u16 q = skb_get_queue_mapping(skb);
struct ravb_tstamp_skb *ts_skb;
@@ -1629,28 +1996,30 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
desc->dptr = cpu_to_le32(dma_addr);
/* TX timestamp required */
- if (q == RAVB_NC) {
- ts_skb = kmalloc(sizeof(*ts_skb), GFP_ATOMIC);
- if (!ts_skb) {
- if (num_tx_desc > 1) {
- desc--;
- dma_unmap_single(ndev->dev.parent, dma_addr,
- len, DMA_TO_DEVICE);
+ if (info->gptp || info->ccc_gac) {
+ if (q == RAVB_NC) {
+ ts_skb = kmalloc(sizeof(*ts_skb), GFP_ATOMIC);
+ if (!ts_skb) {
+ if (num_tx_desc > 1) {
+ desc--;
+ dma_unmap_single(ndev->dev.parent, dma_addr,
+ len, DMA_TO_DEVICE);
+ }
+ goto unmap;
}
- goto unmap;
+ ts_skb->skb = skb_get(skb);
+ ts_skb->tag = priv->ts_skb_tag++;
+ priv->ts_skb_tag &= 0x3ff;
+ list_add_tail(&ts_skb->list, &priv->ts_skb_list);
+
+ /* TAG and timestamp required flag */
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ desc->tagh_tsr = (ts_skb->tag >> 4) | TX_TSR;
+ desc->ds_tagl |= cpu_to_le16(ts_skb->tag << 12);
}
- ts_skb->skb = skb_get(skb);
- ts_skb->tag = priv->ts_skb_tag++;
- priv->ts_skb_tag &= 0x3ff;
- list_add_tail(&ts_skb->list, &priv->ts_skb_list);
- /* TAG and timestamp required flag */
- skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
- desc->tagh_tsr = (ts_skb->tag >> 4) | TX_TSR;
- desc->ds_tagl |= cpu_to_le16(ts_skb->tag << 12);
+ skb_tx_timestamp(skb);
}
-
- skb_tx_timestamp(skb);
/* Descriptor type must be set after all the above writes */
dma_wmb();
if (num_tx_desc > 1) {
@@ -1698,28 +2067,45 @@ static struct net_device_stats *ravb_get_stats(struct net_device *ndev)
nstats = &ndev->stats;
stats0 = &priv->stats[RAVB_BE];
- stats1 = &priv->stats[RAVB_NC];
if (info->tx_counters) {
nstats->tx_dropped += ravb_read(ndev, TROCR);
ravb_write(ndev, 0, TROCR); /* (write clear) */
}
- nstats->rx_packets = stats0->rx_packets + stats1->rx_packets;
- nstats->tx_packets = stats0->tx_packets + stats1->tx_packets;
- nstats->rx_bytes = stats0->rx_bytes + stats1->rx_bytes;
- nstats->tx_bytes = stats0->tx_bytes + stats1->tx_bytes;
- nstats->multicast = stats0->multicast + stats1->multicast;
- nstats->rx_errors = stats0->rx_errors + stats1->rx_errors;
- nstats->rx_crc_errors = stats0->rx_crc_errors + stats1->rx_crc_errors;
- nstats->rx_frame_errors =
- stats0->rx_frame_errors + stats1->rx_frame_errors;
- nstats->rx_length_errors =
- stats0->rx_length_errors + stats1->rx_length_errors;
- nstats->rx_missed_errors =
- stats0->rx_missed_errors + stats1->rx_missed_errors;
- nstats->rx_over_errors =
- stats0->rx_over_errors + stats1->rx_over_errors;
+ if (info->carrier_counters) {
+ nstats->collisions += ravb_read(ndev, CXR41);
+ ravb_write(ndev, 0, CXR41); /* (write clear) */
+ nstats->tx_carrier_errors += ravb_read(ndev, CXR42);
+ ravb_write(ndev, 0, CXR42); /* (write clear) */
+ }
+
+ nstats->rx_packets = stats0->rx_packets;
+ nstats->tx_packets = stats0->tx_packets;
+ nstats->rx_bytes = stats0->rx_bytes;
+ nstats->tx_bytes = stats0->tx_bytes;
+ nstats->multicast = stats0->multicast;
+ nstats->rx_errors = stats0->rx_errors;
+ nstats->rx_crc_errors = stats0->rx_crc_errors;
+ nstats->rx_frame_errors = stats0->rx_frame_errors;
+ nstats->rx_length_errors = stats0->rx_length_errors;
+ nstats->rx_missed_errors = stats0->rx_missed_errors;
+ nstats->rx_over_errors = stats0->rx_over_errors;
+ if (info->nc_queues) {
+ stats1 = &priv->stats[RAVB_NC];
+
+ nstats->rx_packets += stats1->rx_packets;
+ nstats->tx_packets += stats1->tx_packets;
+ nstats->rx_bytes += stats1->rx_bytes;
+ nstats->tx_bytes += stats1->tx_bytes;
+ nstats->multicast += stats1->multicast;
+ nstats->rx_errors += stats1->rx_errors;
+ nstats->rx_crc_errors += stats1->rx_crc_errors;
+ nstats->rx_frame_errors += stats1->rx_frame_errors;
+ nstats->rx_length_errors += stats1->rx_length_errors;
+ nstats->rx_missed_errors += stats1->rx_missed_errors;
+ nstats->rx_over_errors += stats1->rx_over_errors;
+ }
return nstats;
}
@@ -1752,7 +2138,7 @@ static int ravb_close(struct net_device *ndev)
ravb_write(ndev, 0, TIC);
/* Stop PTP Clock driver */
- if (info->no_ptp_cfg_active)
+ if (info->gptp)
ravb_ptp_stop(ndev);
/* Set the config mode to stop the AVB-DMAC's processes */
@@ -1761,10 +2147,12 @@ static int ravb_close(struct net_device *ndev)
"device will be stopped after h/w processes are done.\n");
/* Clear the timestamp list */
- list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list, list) {
- list_del(&ts_skb->list);
- kfree_skb(ts_skb->skb);
- kfree(ts_skb);
+ if (info->gptp || info->ccc_gac) {
+ list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list, list) {
+ list_del(&ts_skb->list);
+ kfree_skb(ts_skb->skb);
+ kfree(ts_skb);
+ }
}
/* PHY disconnect */
@@ -1784,12 +2172,14 @@ static int ravb_close(struct net_device *ndev)
}
free_irq(ndev->irq, ndev);
- napi_disable(&priv->napi[RAVB_NC]);
+ if (info->nc_queues)
+ napi_disable(&priv->napi[RAVB_NC]);
napi_disable(&priv->napi[RAVB_BE]);
/* Free all the skb's in the RX queue and the DMA buffers. */
ravb_ring_free(ndev, RAVB_BE);
- ravb_ring_free(ndev, RAVB_NC);
+ if (info->nc_queues)
+ ravb_ring_free(ndev, RAVB_NC);
return 0;
}
@@ -1918,8 +2308,15 @@ static void ravb_set_rx_csum(struct net_device *ndev, bool enable)
spin_unlock_irqrestore(&priv->lock, flags);
}
-static int ravb_set_features_rx_csum(struct net_device *ndev,
- netdev_features_t features)
+static int ravb_set_features_gbeth(struct net_device *ndev,
+ netdev_features_t features)
+{
+ /* Place holder */
+ return 0;
+}
+
+static int ravb_set_features_rcar(struct net_device *ndev,
+ netdev_features_t features)
{
netdev_features_t changed = ndev->features ^ features;
@@ -1937,7 +2334,7 @@ static int ravb_set_features(struct net_device *ndev,
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
- return info->set_rx_csum_feature(ndev, features);
+ return info->set_feature(ndev, features);
}
static const struct net_device_ops ravb_netdev_ops = {
@@ -2001,43 +2398,72 @@ static int ravb_mdio_release(struct ravb_private *priv)
}
static const struct ravb_hw_info ravb_gen3_hw_info = {
- .rx_ring_free = ravb_rx_ring_free,
- .rx_ring_format = ravb_rx_ring_format,
- .alloc_rx_desc = ravb_alloc_rx_desc,
- .receive = ravb_rcar_rx,
- .set_rate = ravb_set_rate,
- .set_rx_csum_feature = ravb_set_features_rx_csum,
- .dmac_init = ravb_rcar_dmac_init,
- .emac_init = ravb_rcar_emac_init,
+ .rx_ring_free = ravb_rx_ring_free_rcar,
+ .rx_ring_format = ravb_rx_ring_format_rcar,
+ .alloc_rx_desc = ravb_alloc_rx_desc_rcar,
+ .receive = ravb_rx_rcar,
+ .set_rate = ravb_set_rate_rcar,
+ .set_feature = ravb_set_features_rcar,
+ .dmac_init = ravb_dmac_init_rcar,
+ .emac_init = ravb_emac_init_rcar,
.gstrings_stats = ravb_gstrings_stats,
.gstrings_size = sizeof(ravb_gstrings_stats),
.net_hw_features = NETIF_F_RXCSUM,
.net_features = NETIF_F_RXCSUM,
.stats_len = ARRAY_SIZE(ravb_gstrings_stats),
.max_rx_len = RX_BUF_SZ + RAVB_ALIGN - 1,
+ .tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3,
+ .rx_max_buf_size = SZ_2K,
.internal_delay = 1,
.tx_counters = 1,
.multi_irqs = 1,
- .ptp_cfg_active = 1,
+ .ccc_gac = 1,
+ .nc_queues = 1,
+ .magic_pkt = 1,
};
static const struct ravb_hw_info ravb_gen2_hw_info = {
- .rx_ring_free = ravb_rx_ring_free,
- .rx_ring_format = ravb_rx_ring_format,
- .alloc_rx_desc = ravb_alloc_rx_desc,
- .receive = ravb_rcar_rx,
- .set_rate = ravb_set_rate,
- .set_rx_csum_feature = ravb_set_features_rx_csum,
- .dmac_init = ravb_rcar_dmac_init,
- .emac_init = ravb_rcar_emac_init,
+ .rx_ring_free = ravb_rx_ring_free_rcar,
+ .rx_ring_format = ravb_rx_ring_format_rcar,
+ .alloc_rx_desc = ravb_alloc_rx_desc_rcar,
+ .receive = ravb_rx_rcar,
+ .set_rate = ravb_set_rate_rcar,
+ .set_feature = ravb_set_features_rcar,
+ .dmac_init = ravb_dmac_init_rcar,
+ .emac_init = ravb_emac_init_rcar,
.gstrings_stats = ravb_gstrings_stats,
.gstrings_size = sizeof(ravb_gstrings_stats),
.net_hw_features = NETIF_F_RXCSUM,
.net_features = NETIF_F_RXCSUM,
.stats_len = ARRAY_SIZE(ravb_gstrings_stats),
.max_rx_len = RX_BUF_SZ + RAVB_ALIGN - 1,
+ .tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3,
+ .rx_max_buf_size = SZ_2K,
.aligned_tx = 1,
- .no_ptp_cfg_active = 1,
+ .gptp = 1,
+ .nc_queues = 1,
+ .magic_pkt = 1,
+};
+
+static const struct ravb_hw_info gbeth_hw_info = {
+ .rx_ring_free = ravb_rx_ring_free_gbeth,
+ .rx_ring_format = ravb_rx_ring_format_gbeth,
+ .alloc_rx_desc = ravb_alloc_rx_desc_gbeth,
+ .receive = ravb_rx_gbeth,
+ .set_rate = ravb_set_rate_gbeth,
+ .set_feature = ravb_set_features_gbeth,
+ .dmac_init = ravb_dmac_init_gbeth,
+ .emac_init = ravb_emac_init_gbeth,
+ .gstrings_stats = ravb_gstrings_stats_gbeth,
+ .gstrings_size = sizeof(ravb_gstrings_stats_gbeth),
+ .stats_len = ARRAY_SIZE(ravb_gstrings_stats_gbeth),
+ .max_rx_len = ALIGN(GBETH_RX_BUFF_MAX, RAVB_ALIGN),
+ .tccr_mask = TCCR_TSRQ0,
+ .rx_max_buf_size = SZ_8K,
+ .aligned_tx = 1,
+ .tx_counters = 1,
+ .carrier_counters = 1,
+ .half_duplex = 1,
};
static const struct of_device_id ravb_match_table[] = {
@@ -2046,6 +2472,7 @@ static const struct of_device_id ravb_match_table[] = {
{ .compatible = "renesas,etheravb-rcar-gen2", .data = &ravb_gen2_hw_info },
{ .compatible = "renesas,etheravb-r8a7795", .data = &ravb_gen3_hw_info },
{ .compatible = "renesas,etheravb-rcar-gen3", .data = &ravb_gen3_hw_info },
+ { .compatible = "renesas,rzg2l-gbeth", .data = &gbeth_hw_info },
{ }
};
MODULE_DEVICE_TABLE(of, ravb_match_table);
@@ -2080,13 +2507,15 @@ static void ravb_set_config_mode(struct net_device *ndev)
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
- if (info->no_ptp_cfg_active) {
+ if (info->gptp) {
ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG);
/* Set CSEL value */
ravb_modify(ndev, CCC, CCC_CSEL, CCC_CSEL_HPB);
- } else {
+ } else if (info->ccc_gac) {
ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG |
CCC_GAC | CCC_CSEL_HPB);
+ } else {
+ ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG);
}
}
@@ -2192,8 +2621,11 @@ static int ravb_probe(struct platform_device *pdev)
priv->pdev = pdev;
priv->num_tx_ring[RAVB_BE] = BE_TX_RING_SIZE;
priv->num_rx_ring[RAVB_BE] = BE_RX_RING_SIZE;
- priv->num_tx_ring[RAVB_NC] = NC_TX_RING_SIZE;
- priv->num_rx_ring[RAVB_NC] = NC_RX_RING_SIZE;
+ if (info->nc_queues) {
+ priv->num_tx_ring[RAVB_NC] = NC_TX_RING_SIZE;
+ priv->num_rx_ring[RAVB_NC] = NC_RX_RING_SIZE;
+ }
+
priv->addr = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(priv->addr)) {
error = PTR_ERR(priv->addr);
@@ -2252,7 +2684,7 @@ static int ravb_probe(struct platform_device *pdev)
}
clk_prepare_enable(priv->refclk);
- ndev->max_mtu = 2048 - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN);
+ ndev->max_mtu = info->rx_max_buf_size - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN);
ndev->min_mtu = ETH_MIN_MTU;
/* FIXME: R-Car Gen2 has 4byte alignment restriction for tx buffer
@@ -2269,13 +2701,15 @@ static int ravb_probe(struct platform_device *pdev)
/* Set AVB config mode */
ravb_set_config_mode(ndev);
- /* Set GTI value */
- error = ravb_set_gti(ndev);
- if (error)
- goto out_disable_refclk;
+ if (info->gptp || info->ccc_gac) {
+ /* Set GTI value */
+ error = ravb_set_gti(ndev);
+ if (error)
+ goto out_disable_refclk;
- /* Request GTI loading */
- ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI);
+ /* Request GTI loading */
+ ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI);
+ }
if (info->internal_delay) {
ravb_parse_delay_mode(np, ndev);
@@ -2301,7 +2735,7 @@ static int ravb_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&priv->ts_skb_list);
/* Initialise PTP Clock driver */
- if (info->ptp_cfg_active)
+ if (info->ccc_gac)
ravb_ptp_init(ndev, pdev);
/* Debug message level */
@@ -2323,7 +2757,8 @@ static int ravb_probe(struct platform_device *pdev)
}
netif_napi_add(ndev, &priv->napi[RAVB_BE], ravb_poll, 64);
- netif_napi_add(ndev, &priv->napi[RAVB_NC], ravb_poll, 64);
+ if (info->nc_queues)
+ netif_napi_add(ndev, &priv->napi[RAVB_NC], ravb_poll, 64);
/* Network device register */
error = register_netdev(ndev);
@@ -2341,7 +2776,9 @@ static int ravb_probe(struct platform_device *pdev)
return 0;
out_napi_del:
- netif_napi_del(&priv->napi[RAVB_NC]);
+ if (info->nc_queues)
+ netif_napi_del(&priv->napi[RAVB_NC]);
+
netif_napi_del(&priv->napi[RAVB_BE]);
ravb_mdio_release(priv);
out_dma_free:
@@ -2349,7 +2786,7 @@ out_dma_free:
priv->desc_bat_dma);
/* Stop PTP Clock driver */
- if (info->ptp_cfg_active)
+ if (info->ccc_gac)
ravb_ptp_stop(ndev);
out_disable_refclk:
clk_disable_unprepare(priv->refclk);
@@ -2369,7 +2806,7 @@ static int ravb_remove(struct platform_device *pdev)
const struct ravb_hw_info *info = priv->info;
/* Stop PTP Clock driver */
- if (info->ptp_cfg_active)
+ if (info->ccc_gac)
ravb_ptp_stop(ndev);
clk_disable_unprepare(priv->refclk);
@@ -2380,7 +2817,8 @@ static int ravb_remove(struct platform_device *pdev)
ravb_write(ndev, CCC_OPC_RESET, CCC);
pm_runtime_put_sync(&pdev->dev);
unregister_netdev(ndev);
- netif_napi_del(&priv->napi[RAVB_NC]);
+ if (info->nc_queues)
+ netif_napi_del(&priv->napi[RAVB_NC]);
netif_napi_del(&priv->napi[RAVB_BE]);
ravb_mdio_release(priv);
pm_runtime_disable(&pdev->dev);
@@ -2394,6 +2832,7 @@ static int ravb_remove(struct platform_device *pdev)
static int ravb_wol_setup(struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
+ const struct ravb_hw_info *info = priv->info;
/* Disable interrupts by clearing the interrupt masks. */
ravb_write(ndev, 0, RIC0);
@@ -2402,7 +2841,8 @@ static int ravb_wol_setup(struct net_device *ndev)
/* Only allow ECI interrupts */
synchronize_irq(priv->emac_irq);
- napi_disable(&priv->napi[RAVB_NC]);
+ if (info->nc_queues)
+ napi_disable(&priv->napi[RAVB_NC]);
napi_disable(&priv->napi[RAVB_BE]);
ravb_write(ndev, ECSIPR_MPDIP, ECSIPR);
@@ -2415,9 +2855,11 @@ static int ravb_wol_setup(struct net_device *ndev)
static int ravb_wol_restore(struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
+ const struct ravb_hw_info *info = priv->info;
int ret;
- napi_enable(&priv->napi[RAVB_NC]);
+ if (info->nc_queues)
+ napi_enable(&priv->napi[RAVB_NC]);
napi_enable(&priv->napi[RAVB_BE]);
/* Disable MagicPacket */
@@ -2468,13 +2910,15 @@ static int __maybe_unused ravb_resume(struct device *dev)
/* Set AVB config mode */
ravb_set_config_mode(ndev);
- /* Set GTI value */
- ret = ravb_set_gti(ndev);
- if (ret)
- return ret;
+ if (info->gptp || info->ccc_gac) {
+ /* Set GTI value */
+ ret = ravb_set_gti(ndev);
+ if (ret)
+ return ret;
- /* Request GTI loading */
- ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI);
+ /* Request GTI loading */
+ ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI);
+ }
if (info->internal_delay)
ravb_set_delay_mode(ndev);
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 1374faa229a2..a3fbb2221c9a 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -1153,17 +1153,19 @@ static void update_mac_address(struct net_device *ndev)
static void read_mac_address(struct net_device *ndev, unsigned char *mac)
{
if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) {
- memcpy(ndev->dev_addr, mac, ETH_ALEN);
+ eth_hw_addr_set(ndev, mac);
} else {
u32 mahr = sh_eth_read(ndev, MAHR);
u32 malr = sh_eth_read(ndev, MALR);
-
- ndev->dev_addr[0] = (mahr >> 24) & 0xFF;
- ndev->dev_addr[1] = (mahr >> 16) & 0xFF;
- ndev->dev_addr[2] = (mahr >> 8) & 0xFF;
- ndev->dev_addr[3] = (mahr >> 0) & 0xFF;
- ndev->dev_addr[4] = (malr >> 8) & 0xFF;
- ndev->dev_addr[5] = (malr >> 0) & 0xFF;
+ u8 addr[ETH_ALEN];
+
+ addr[0] = (mahr >> 24) & 0xFF;
+ addr[1] = (mahr >> 16) & 0xFF;
+ addr[2] = (mahr >> 8) & 0xFF;
+ addr[3] = (mahr >> 0) & 0xFF;
+ addr[4] = (malr >> 8) & 0xFF;
+ addr[5] = (malr >> 0) & 0xFF;
+ eth_hw_addr_set(ndev, addr);
}
}
diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c
index 3364b6a56bd1..ba4062881eed 100644
--- a/drivers/net/ethernet/rocker/rocker_main.c
+++ b/drivers/net/ethernet/rocker/rocker_main.c
@@ -1954,7 +1954,7 @@ static int rocker_port_set_mac_address(struct net_device *dev, void *p)
err = rocker_cmd_set_port_settings_macaddr(rocker_port, addr->sa_data);
if (err)
return err;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, addr->sa_data);
return 0;
}
@@ -2545,11 +2545,13 @@ static void rocker_port_dev_addr_init(struct rocker_port *rocker_port)
{
const struct rocker *rocker = rocker_port->rocker;
const struct pci_dev *pdev = rocker->pdev;
+ u8 addr[ETH_ALEN];
int err;
- err = rocker_cmd_get_port_settings_macaddr(rocker_port,
- rocker_port->dev->dev_addr);
- if (err) {
+ err = rocker_cmd_get_port_settings_macaddr(rocker_port, addr);
+ if (!err) {
+ eth_hw_addr_set(rocker_port->dev, addr);
+ } else {
dev_warn(&pdev->dev, "failed to get mac address, using random\n");
eth_hw_addr_random(rocker_port->dev);
}
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h b/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h
index 049dc6cf4611..0f45107db8dd 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h
@@ -329,7 +329,7 @@ struct sxgbe_core_ops {
/* Set power management mode (e.g. magic frame) */
void (*pmt)(void __iomem *ioaddr, unsigned long mode);
/* Set/Get Unicast MAC addresses */
- void (*set_umac_addr)(void __iomem *ioaddr, unsigned char *addr,
+ void (*set_umac_addr)(void __iomem *ioaddr, const unsigned char *addr,
unsigned int reg_n);
void (*get_umac_addr)(void __iomem *ioaddr, unsigned char *addr,
unsigned int reg_n);
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_core.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_core.c
index e96e2bd295ef..7d9f257de92a 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_core.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_core.c
@@ -85,7 +85,8 @@ static void sxgbe_core_pmt(void __iomem *ioaddr, unsigned long mode)
}
/* Set/Get Unicast MAC addresses */
-static void sxgbe_core_set_umac_addr(void __iomem *ioaddr, unsigned char *addr,
+static void sxgbe_core_set_umac_addr(void __iomem *ioaddr,
+ const unsigned char *addr,
unsigned int reg_n)
{
u32 high_word, low_word;
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
index 6781aa636d58..32161a56726c 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
@@ -931,10 +931,13 @@ static int sxgbe_get_hw_features(struct sxgbe_priv_data * const priv)
static void sxgbe_check_ether_addr(struct sxgbe_priv_data *priv)
{
if (!is_valid_ether_addr(priv->dev->dev_addr)) {
+ u8 addr[ETH_ALEN];
+
priv->hw->mac->get_umac_addr((void __iomem *)
- priv->ioaddr,
- priv->dev->dev_addr, 0);
- if (!is_valid_ether_addr(priv->dev->dev_addr))
+ priv->ioaddr, addr, 0);
+ if (is_valid_ether_addr(addr))
+ eth_hw_addr_set(priv->dev, addr);
+ else
eth_hw_addr_random(priv->dev);
}
dev_info(priv->device, "device MAC address %pM\n",
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
index 4639ed9438a3..926532466691 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
@@ -118,7 +118,7 @@ static int sxgbe_platform_probe(struct platform_device *pdev)
}
/* Get MAC address if available (DT) */
- of_get_mac_address(node, priv->dev->dev_addr);
+ of_get_ethdev_address(node, priv->dev);
/* Get the TX/RX IRQ numbers */
for (i = 0, chan = 1; i < SXGBE_TX_QUEUES; i++) {
diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c
index 37ff25a84030..96065dfc747b 100644
--- a/drivers/net/ethernet/seeq/sgiseeq.c
+++ b/drivers/net/ethernet/seeq/sgiseeq.c
@@ -167,7 +167,7 @@ static int sgiseeq_set_mac_address(struct net_device *dev, void *addr)
struct sgiseeq_private *sp = netdev_priv(dev);
struct sockaddr *sa = addr;
- memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, sa->sa_data);
spin_lock_irq(&sp->tx_lock);
__sgiseeq_set_mac_address(dev);
@@ -764,7 +764,7 @@ static int sgiseeq_probe(struct platform_device *pdev)
setup_rx_ring(dev, sp->rx_desc, SEEQ_RX_BUFFERS);
setup_tx_ring(dev, sp->tx_desc, SEEQ_TX_BUFFERS);
- memcpy(dev->dev_addr, pd->mac, ETH_ALEN);
+ eth_hw_addr_set(dev, pd->mac);
#ifdef DEBUG
gpriv = sp;
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index e7e2223aebbf..cf366ed2557c 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -1038,7 +1038,7 @@ int efx_ef10_vadaptor_free(struct efx_nic *efx, unsigned int port_id)
}
int efx_ef10_vport_add_mac(struct efx_nic *efx,
- unsigned int port_id, u8 *mac)
+ unsigned int port_id, const u8 *mac)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN);
@@ -1050,7 +1050,7 @@ int efx_ef10_vport_add_mac(struct efx_nic *efx,
}
int efx_ef10_vport_del_mac(struct efx_nic *efx,
- unsigned int port_id, u8 *mac)
+ unsigned int port_id, const u8 *mac)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN);
diff --git a/drivers/net/ethernet/sfc/ef100_nic.c b/drivers/net/ethernet/sfc/ef100_nic.c
index 518268ce2064..6aa81229b68a 100644
--- a/drivers/net/ethernet/sfc/ef100_nic.c
+++ b/drivers/net/ethernet/sfc/ef100_nic.c
@@ -1250,7 +1250,7 @@ int ef100_probe_pf(struct efx_nic *efx)
if (rc)
goto fail;
/* Assign MAC address */
- memcpy(net_dev->dev_addr, net_dev->perm_addr, ETH_ALEN);
+ eth_hw_addr_set(net_dev, net_dev->perm_addr);
memcpy(nic_data->port_id, net_dev->perm_addr, ETH_ALEN);
return 0;
diff --git a/drivers/net/ethernet/sfc/ef10_sriov.c b/drivers/net/ethernet/sfc/ef10_sriov.c
index 752d6406f07e..7f5aa4a8c451 100644
--- a/drivers/net/ethernet/sfc/ef10_sriov.c
+++ b/drivers/net/ethernet/sfc/ef10_sriov.c
@@ -480,7 +480,7 @@ static int efx_ef10_vport_del_vf_mac(struct efx_nic *efx, unsigned int port_id,
return rc;
}
-int efx_ef10_sriov_set_vf_mac(struct efx_nic *efx, int vf_i, u8 *mac)
+int efx_ef10_sriov_set_vf_mac(struct efx_nic *efx, int vf_i, const u8 *mac)
{
struct efx_ef10_nic_data *nic_data = efx->nic_data;
struct ef10_vf *vf;
@@ -523,7 +523,7 @@ int efx_ef10_sriov_set_vf_mac(struct efx_nic *efx, int vf_i, u8 *mac)
goto fail;
if (vf->efx)
- ether_addr_copy(vf->efx->net_dev->dev_addr, mac);
+ eth_hw_addr_set(vf->efx->net_dev, mac);
}
ether_addr_copy(vf->mac, mac);
diff --git a/drivers/net/ethernet/sfc/ef10_sriov.h b/drivers/net/ethernet/sfc/ef10_sriov.h
index cfe556d17313..3c703ca878b0 100644
--- a/drivers/net/ethernet/sfc/ef10_sriov.h
+++ b/drivers/net/ethernet/sfc/ef10_sriov.h
@@ -39,7 +39,7 @@ static inline void efx_ef10_sriov_reset(struct efx_nic *efx) {}
void efx_ef10_sriov_fini(struct efx_nic *efx);
static inline void efx_ef10_sriov_flr(struct efx_nic *efx, unsigned vf_i) {}
-int efx_ef10_sriov_set_vf_mac(struct efx_nic *efx, int vf, u8 *mac);
+int efx_ef10_sriov_set_vf_mac(struct efx_nic *efx, int vf, const u8 *mac);
int efx_ef10_sriov_set_vf_vlan(struct efx_nic *efx, int vf_i,
u16 vlan, u8 qos);
@@ -60,9 +60,9 @@ int efx_ef10_vswitching_restore_vf(struct efx_nic *efx);
void efx_ef10_vswitching_remove_pf(struct efx_nic *efx);
void efx_ef10_vswitching_remove_vf(struct efx_nic *efx);
int efx_ef10_vport_add_mac(struct efx_nic *efx,
- unsigned int port_id, u8 *mac);
+ unsigned int port_id, const u8 *mac);
int efx_ef10_vport_del_mac(struct efx_nic *efx,
- unsigned int port_id, u8 *mac);
+ unsigned int port_id, const u8 *mac);
int efx_ef10_vadaptor_alloc(struct efx_nic *efx, unsigned int port_id);
int efx_ef10_vadaptor_query(struct efx_nic *efx, unsigned int port_id,
u32 *port_flags, u32 *vadaptor_flags,
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 43ef4f529028..6960a2fe2b53 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -136,7 +136,7 @@ static int efx_probe_port(struct efx_nic *efx)
return rc;
/* Initialise MAC address to permanent address */
- ether_addr_copy(efx->net_dev->dev_addr, efx->net_dev->perm_addr);
+ eth_hw_addr_set(efx->net_dev, efx->net_dev->perm_addr);
return 0;
}
diff --git a/drivers/net/ethernet/sfc/efx_common.c b/drivers/net/ethernet/sfc/efx_common.c
index 896b59253197..f187631b2c5c 100644
--- a/drivers/net/ethernet/sfc/efx_common.c
+++ b/drivers/net/ethernet/sfc/efx_common.c
@@ -181,11 +181,11 @@ int efx_set_mac_address(struct net_device *net_dev, void *data)
/* save old address */
ether_addr_copy(old_addr, net_dev->dev_addr);
- ether_addr_copy(net_dev->dev_addr, new_addr);
+ eth_hw_addr_set(net_dev, new_addr);
if (efx->type->set_mac_address) {
rc = efx->type->set_mac_address(efx);
if (rc) {
- ether_addr_copy(net_dev->dev_addr, old_addr);
+ eth_hw_addr_set(net_dev, old_addr);
return rc;
}
}
diff --git a/drivers/net/ethernet/sfc/ethtool_common.c b/drivers/net/ethernet/sfc/ethtool_common.c
index bf1443539a1a..bd552c7dffcb 100644
--- a/drivers/net/ethernet/sfc/ethtool_common.c
+++ b/drivers/net/ethernet/sfc/ethtool_common.c
@@ -563,20 +563,14 @@ int efx_ethtool_get_link_ksettings(struct net_device *net_dev,
{
struct efx_nic *efx = netdev_priv(net_dev);
struct efx_link_state *link_state = &efx->link_state;
- u32 supported;
mutex_lock(&efx->mac_lock);
efx_mcdi_phy_get_link_ksettings(efx, cmd);
mutex_unlock(&efx->mac_lock);
/* Both MACs support pause frames (bidirectional and respond-only) */
- ethtool_convert_link_mode_to_legacy_u32(&supported,
- cmd->link_modes.supported);
-
- supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
-
- ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
- supported);
+ ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
+ ethtool_link_ksettings_add_link_mode(cmd, supported, Asym_Pause);
if (LOOPBACK_INTERNAL(efx)) {
cmd->base.speed = link_state->speed;
diff --git a/drivers/net/ethernet/sfc/falcon/efx.c b/drivers/net/ethernet/sfc/falcon/efx.c
index 423bdf81200f..c68837a951f4 100644
--- a/drivers/net/ethernet/sfc/falcon/efx.c
+++ b/drivers/net/ethernet/sfc/falcon/efx.c
@@ -1044,7 +1044,7 @@ static int ef4_probe_port(struct ef4_nic *efx)
return rc;
/* Initialise MAC address to permanent address */
- ether_addr_copy(efx->net_dev->dev_addr, efx->net_dev->perm_addr);
+ eth_hw_addr_set(efx->net_dev, efx->net_dev->perm_addr);
return 0;
}
@@ -2162,11 +2162,11 @@ static int ef4_set_mac_address(struct net_device *net_dev, void *data)
/* save old address */
ether_addr_copy(old_addr, net_dev->dev_addr);
- ether_addr_copy(net_dev->dev_addr, new_addr);
+ eth_hw_addr_set(net_dev, new_addr);
if (efx->type->set_mac_address) {
rc = efx->type->set_mac_address(efx);
if (rc) {
- ether_addr_copy(net_dev->dev_addr, old_addr);
+ eth_hw_addr_set(net_dev, old_addr);
return rc;
}
}
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index f6981810039d..cc15ee8812d9 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -1440,7 +1440,7 @@ struct efx_nic_type {
bool (*sriov_wanted)(struct efx_nic *efx);
void (*sriov_reset)(struct efx_nic *efx);
void (*sriov_flr)(struct efx_nic *efx, unsigned vf_i);
- int (*sriov_set_vf_mac)(struct efx_nic *efx, int vf_i, u8 *mac);
+ int (*sriov_set_vf_mac)(struct efx_nic *efx, int vf_i, const u8 *mac);
int (*sriov_set_vf_vlan)(struct efx_nic *efx, int vf_i, u16 vlan,
u8 qos);
int (*sriov_set_vf_spoofchk)(struct efx_nic *efx, int vf_i,
diff --git a/drivers/net/ethernet/sfc/siena_sriov.c b/drivers/net/ethernet/sfc/siena_sriov.c
index 441e7f3e5375..f12851a527d9 100644
--- a/drivers/net/ethernet/sfc/siena_sriov.c
+++ b/drivers/net/ethernet/sfc/siena_sriov.c
@@ -1591,7 +1591,7 @@ void efx_fini_sriov(void)
destroy_workqueue(vfdi_workqueue);
}
-int efx_siena_sriov_set_vf_mac(struct efx_nic *efx, int vf_i, u8 *mac)
+int efx_siena_sriov_set_vf_mac(struct efx_nic *efx, int vf_i, const u8 *mac)
{
struct siena_nic_data *nic_data = efx->nic_data;
struct siena_vf *vf;
diff --git a/drivers/net/ethernet/sfc/siena_sriov.h b/drivers/net/ethernet/sfc/siena_sriov.h
index e441c89c25ce..e548c4daf189 100644
--- a/drivers/net/ethernet/sfc/siena_sriov.h
+++ b/drivers/net/ethernet/sfc/siena_sriov.h
@@ -46,7 +46,7 @@ bool efx_siena_sriov_wanted(struct efx_nic *efx);
void efx_siena_sriov_reset(struct efx_nic *efx);
void efx_siena_sriov_flr(struct efx_nic *efx, unsigned flr);
-int efx_siena_sriov_set_vf_mac(struct efx_nic *efx, int vf, u8 *mac);
+int efx_siena_sriov_set_vf_mac(struct efx_nic *efx, int vf, const u8 *mac);
int efx_siena_sriov_set_vf_vlan(struct efx_nic *efx, int vf,
u16 vlan, u8 qos);
int efx_siena_sriov_set_vf_spoofchk(struct efx_nic *efx, int vf,
diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c
index 062f7844c496..e2d009866a7b 100644
--- a/drivers/net/ethernet/sgi/ioc3-eth.c
+++ b/drivers/net/ethernet/sgi/ioc3-eth.c
@@ -243,7 +243,7 @@ static int ioc3_set_mac_address(struct net_device *dev, void *addr)
struct ioc3_private *ip = netdev_priv(dev);
struct sockaddr *sa = addr;
- memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, sa->sa_data);
spin_lock_irq(&ip->ioc3_lock);
__ioc3_set_mac_address(dev);
@@ -920,7 +920,7 @@ static int ioc3eth_probe(struct platform_device *pdev)
ioc3_mii_start(ip);
ioc3_ssram_disc(ip);
- memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
+ eth_hw_addr_set(dev, mac_addr);
/* The IOC3-specific entries in the device structure. */
dev->watchdog_timeo = 5 * HZ;
diff --git a/drivers/net/ethernet/sgi/meth.c b/drivers/net/ethernet/sgi/meth.c
index efce834d8ee6..6d850ea2b94c 100644
--- a/drivers/net/ethernet/sgi/meth.c
+++ b/drivers/net/ethernet/sgi/meth.c
@@ -836,7 +836,7 @@ static int meth_probe(struct platform_device *pdev)
dev->watchdog_timeo = timeout;
dev->irq = MACE_ETHERNET_IRQ;
dev->base_addr = (unsigned long)&mace->eth;
- memcpy(dev->dev_addr, o2meth_eaddr, ETH_ALEN);
+ eth_hw_addr_set(dev, o2meth_eaddr);
priv = netdev_priv(dev);
priv->pdev = pdev;
diff --git a/drivers/net/ethernet/silan/sc92031.c b/drivers/net/ethernet/silan/sc92031.c
index 1fd08a04bd4e..ff4197f5e46d 100644
--- a/drivers/net/ethernet/silan/sc92031.c
+++ b/drivers/net/ethernet/silan/sc92031.c
@@ -1400,6 +1400,7 @@ static int sc92031_probe(struct pci_dev *pdev, const struct pci_device_id *id)
void __iomem* port_base;
struct net_device *dev;
struct sc92031_priv *priv;
+ u8 addr[ETH_ALEN];
u32 mac0, mac1;
err = pci_enable_device(pdev);
@@ -1458,12 +1459,13 @@ static int sc92031_probe(struct pci_dev *pdev, const struct pci_device_id *id)
mac0 = ioread32(port_base + MAC0);
mac1 = ioread32(port_base + MAC0 + 4);
- dev->dev_addr[0] = mac0 >> 24;
- dev->dev_addr[1] = mac0 >> 16;
- dev->dev_addr[2] = mac0 >> 8;
- dev->dev_addr[3] = mac0;
- dev->dev_addr[4] = mac1 >> 8;
- dev->dev_addr[5] = mac1;
+ addr[0] = mac0 >> 24;
+ addr[1] = mac0 >> 16;
+ addr[2] = mac0 >> 8;
+ addr[3] = mac0;
+ addr[4] = mac1 >> 8;
+ addr[5] = mac1;
+ eth_hw_addr_set(dev, addr);
err = register_netdev(dev);
if (err < 0)
diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c
index 3d1a18a01ce5..216bb2d34d7c 100644
--- a/drivers/net/ethernet/sis/sis190.c
+++ b/drivers/net/ethernet/sis/sis190.c
@@ -1070,7 +1070,7 @@ static int sis190_open(struct net_device *dev)
/*
* Rx and Tx descriptors need 256 bytes alignment.
- * pci_alloc_consistent() guarantees a stronger alignment.
+ * dma_alloc_coherent() guarantees a stronger alignment.
*/
tp->TxDescRing = dma_alloc_coherent(&pdev->dev, TX_RING_BYTES,
&tp->tx_dma, GFP_KERNEL);
@@ -1586,6 +1586,7 @@ static int sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
{
struct sis190_private *tp = netdev_priv(dev);
void __iomem *ioaddr = tp->mmio_addr;
+ __le16 addr[ETH_ALEN / 2];
u16 sig;
int i;
@@ -1606,8 +1607,9 @@ static int sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
for (i = 0; i < ETH_ALEN / 2; i++) {
u16 w = sis190_read_eeprom(ioaddr, EEPROMMACAddr + i);
- ((__le16 *)dev->dev_addr)[i] = cpu_to_le16(w);
+ addr[i] = cpu_to_le16(w);
}
+ eth_hw_addr_set(dev, (u8 *)addr);
sis190_set_rgmii(tp, sis190_read_eeprom(ioaddr, EEPROMInfo));
@@ -1629,6 +1631,7 @@ static int sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
static const u16 ids[] = { 0x0965, 0x0966, 0x0968 };
struct sis190_private *tp = netdev_priv(dev);
struct pci_dev *isa_bridge;
+ u8 addr[ETH_ALEN];
u8 reg, tmp8;
unsigned int i;
@@ -1657,8 +1660,9 @@ static int sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
for (i = 0; i < ETH_ALEN; i++) {
outb(0x9 + i, 0x78);
- dev->dev_addr[i] = inb(0x79);
+ addr[i] = inb(0x79);
}
+ eth_hw_addr_set(dev, addr);
outb(0x12, 0x78);
reg = inb(0x79);
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index 60a0c0e9ded2..cc2d907c4c4b 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -258,6 +258,7 @@ static int sis900_get_mac_addr(struct pci_dev *pci_dev,
{
struct sis900_private *sis_priv = netdev_priv(net_dev);
void __iomem *ioaddr = sis_priv->ioaddr;
+ u16 addr[ETH_ALEN / 2];
u16 signature;
int i;
@@ -271,7 +272,8 @@ static int sis900_get_mac_addr(struct pci_dev *pci_dev,
/* get MAC address from EEPROM */
for (i = 0; i < 3; i++)
- ((u16 *)(net_dev->dev_addr))[i] = read_eeprom(ioaddr, i+EEPROMMACAddr);
+ addr[i] = read_eeprom(ioaddr, i+EEPROMMACAddr);
+ eth_hw_addr_set(net_dev, (u8 *)addr);
return 1;
}
@@ -290,6 +292,7 @@ static int sis630e_get_mac_addr(struct pci_dev *pci_dev,
struct net_device *net_dev)
{
struct pci_dev *isa_bridge = NULL;
+ u8 addr[ETH_ALEN];
u8 reg;
int i;
@@ -306,8 +309,9 @@ static int sis630e_get_mac_addr(struct pci_dev *pci_dev,
for (i = 0; i < 6; i++) {
outb(0x09 + i, 0x70);
- ((u8 *)(net_dev->dev_addr))[i] = inb(0x71);
+ addr[i] = inb(0x71);
}
+ eth_hw_addr_set(net_dev, addr);
pci_write_config_byte(isa_bridge, 0x48, reg & ~0x40);
pci_dev_put(isa_bridge);
@@ -331,6 +335,7 @@ static int sis635_get_mac_addr(struct pci_dev *pci_dev,
{
struct sis900_private *sis_priv = netdev_priv(net_dev);
void __iomem *ioaddr = sis_priv->ioaddr;
+ u16 addr[ETH_ALEN / 2];
u32 rfcrSave;
u32 i;
@@ -345,8 +350,9 @@ static int sis635_get_mac_addr(struct pci_dev *pci_dev,
/* load MAC addr to filter data register */
for (i = 0 ; i < 3 ; i++) {
sw32(rfcr, (i << RFADDR_shift));
- *( ((u16 *)net_dev->dev_addr) + i) = sr16(rfdr);
+ addr[i] = sr16(rfdr);
}
+ eth_hw_addr_set(net_dev, (u8 *)addr);
/* enable packet filtering */
sw32(rfcr, rfcrSave | RFEN);
@@ -375,17 +381,18 @@ static int sis96x_get_mac_addr(struct pci_dev *pci_dev,
{
struct sis900_private *sis_priv = netdev_priv(net_dev);
void __iomem *ioaddr = sis_priv->ioaddr;
+ u16 addr[ETH_ALEN / 2];
int wait, rc = 0;
sw32(mear, EEREQ);
for (wait = 0; wait < 2000; wait++) {
if (sr32(mear) & EEGNT) {
- u16 *mac = (u16 *)net_dev->dev_addr;
int i;
/* get MAC address from EEPROM */
for (i = 0; i < 3; i++)
- mac[i] = read_eeprom(ioaddr, i + EEPROMMACAddr);
+ addr[i] = read_eeprom(ioaddr, i + EEPROMMACAddr);
+ eth_hw_addr_set(net_dev, (u8 *)addr);
rc = 1;
break;
@@ -1098,7 +1105,7 @@ sis900_init_rxfilter (struct net_device * net_dev)
/* load MAC addr to filter data register */
for (i = 0 ; i < 3 ; i++) {
- u32 w = (u32) *((u16 *)(net_dev->dev_addr)+i);
+ u32 w = (u32) *((const u16 *)(net_dev->dev_addr)+i);
sw32(rfcr, i << RFADDR_shift);
sw32(rfdr, w);
diff --git a/drivers/net/ethernet/smsc/epic100.c b/drivers/net/ethernet/smsc/epic100.c
index 44daf79a8f97..a0654e88444c 100644
--- a/drivers/net/ethernet/smsc/epic100.c
+++ b/drivers/net/ethernet/smsc/epic100.c
@@ -325,6 +325,7 @@ static int epic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
struct net_device *dev;
struct epic_private *ep;
int i, ret, option = 0, duplex = 0;
+ __le16 addr[ETH_ALEN / 2];
void *ring_space;
dma_addr_t ring_dma;
@@ -416,7 +417,8 @@ static int epic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Note: the '175 does not have a serial EEPROM. */
for (i = 0; i < 3; i++)
- ((__le16 *)dev->dev_addr)[i] = cpu_to_le16(er16(LAN0 + i*4));
+ addr[i] = cpu_to_le16(er16(LAN0 + i*4));
+ eth_hw_addr_set(dev, (u8 *)addr);
if (debug > 2) {
dev_dbg(&pdev->dev, "EEPROM contents:\n");
diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c
index b008b4e8a2a5..89381f796985 100644
--- a/drivers/net/ethernet/smsc/smc911x.c
+++ b/drivers/net/ethernet/smsc/smc911x.c
@@ -1788,6 +1788,7 @@ static int smc911x_probe(struct net_device *dev)
struct dma_slave_config config;
dma_cap_mask_t mask;
#endif
+ u8 addr[ETH_ALEN];
DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
@@ -1892,7 +1893,8 @@ static int smc911x_probe(struct net_device *dev)
spin_lock_init(&lp->lock);
/* Get the MAC address */
- SMC_GET_MAC_ADDR(lp, dev->dev_addr);
+ SMC_GET_MAC_ADDR(lp, addr);
+ eth_hw_addr_set(dev, addr);
/* now, reset the chip, and put it into a known state */
smc911x_reset(dev);
diff --git a/drivers/net/ethernet/smsc/smc91c92_cs.c b/drivers/net/ethernet/smsc/smc91c92_cs.c
index 42fc37c7887a..37c822e27207 100644
--- a/drivers/net/ethernet/smsc/smc91c92_cs.c
+++ b/drivers/net/ethernet/smsc/smc91c92_cs.c
@@ -347,6 +347,7 @@ static void smc91c92_detach(struct pcmcia_device *link)
static int cvt_ascii_address(struct net_device *dev, char *s)
{
+ u8 mac[ETH_ALEN];
int i, j, da, c;
if (strlen(s) != 12)
@@ -359,8 +360,9 @@ static int cvt_ascii_address(struct net_device *dev, char *s)
da += ((c >= '0') && (c <= '9')) ?
(c - '0') : ((c & 0x0f) + 9);
}
- dev->dev_addr[i] = da;
+ mac[i] = da;
}
+ eth_hw_addr_set(dev, mac);
return 0;
}
@@ -539,6 +541,7 @@ static int mot_setup(struct pcmcia_device *link)
struct net_device *dev = link->priv;
unsigned int ioaddr = dev->base_addr;
int i, wait, loop;
+ u8 mac[ETH_ALEN];
u_int addr;
/* Read Ethernet address from Serial EEPROM */
@@ -559,9 +562,10 @@ static int mot_setup(struct pcmcia_device *link)
return -1;
addr = inw(ioaddr + GENERAL);
- dev->dev_addr[2*i] = addr & 0xff;
- dev->dev_addr[2*i+1] = (addr >> 8) & 0xff;
+ mac[2*i] = addr & 0xff;
+ mac[2*i+1] = (addr >> 8) & 0xff;
}
+ eth_hw_addr_set(dev, mac);
return 0;
}
@@ -666,14 +670,13 @@ static int pcmcia_osi_mac(struct pcmcia_device *p_dev,
void *priv)
{
struct net_device *dev = priv;
- int i;
if (tuple->TupleDataLen < 8)
return -EINVAL;
if (tuple->TupleData[0] != 0x04)
return -EINVAL;
- for (i = 0; i < 6; i++)
- dev->dev_addr[i] = tuple->TupleData[i+2];
+
+ eth_hw_addr_set(dev, &tuple->TupleData[2]);
return 0;
};
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 813ea941b91a..a31c159e96ea 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -1851,6 +1851,7 @@ static int smc_probe(struct net_device *dev, void __iomem *ioaddr,
int retval;
unsigned int val, revision_register;
const char *version_string;
+ u8 addr[ETH_ALEN];
DBG(2, dev, "%s: %s\n", CARDNAME, __func__);
@@ -1922,7 +1923,8 @@ static int smc_probe(struct net_device *dev, void __iomem *ioaddr,
/* Get the MAC address */
SMC_SELECT_BANK(lp, 1);
- SMC_GET_MAC_ADDR(lp, dev->dev_addr);
+ SMC_GET_MAC_ADDR(lp, addr);
+ eth_hw_addr_set(dev, addr);
/* now, reset the chip, and put it into a known state */
smc_reset(dev);
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index 199a97339280..7a50ba00f8ae 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -1503,7 +1503,7 @@ static int smsc911x_soft_reset(struct smsc911x_data *pdata)
/* Sets the device MAC address to dev_addr, called with mac_lock held */
static void
-smsc911x_set_hw_mac_address(struct smsc911x_data *pdata, u8 dev_addr[6])
+smsc911x_set_hw_mac_address(struct smsc911x_data *pdata, const u8 dev_addr[6])
{
u32 mac_high16 = (dev_addr[5] << 8) | dev_addr[4];
u32 mac_low32 = (dev_addr[3] << 24) | (dev_addr[2] << 16) |
@@ -1939,7 +1939,7 @@ static int smsc911x_set_mac_address(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
+ eth_hw_addr_set(dev, addr->sa_data);
spin_lock_irq(&pdata->mac_lock);
smsc911x_set_hw_mac_address(pdata, dev->dev_addr);
@@ -2162,13 +2162,15 @@ static void smsc911x_read_mac_address(struct net_device *dev)
struct smsc911x_data *pdata = netdev_priv(dev);
u32 mac_high16 = smsc911x_mac_read(pdata, ADDRH);
u32 mac_low32 = smsc911x_mac_read(pdata, ADDRL);
+ u8 addr[ETH_ALEN];
- dev->dev_addr[0] = (u8)(mac_low32);
- dev->dev_addr[1] = (u8)(mac_low32 >> 8);
- dev->dev_addr[2] = (u8)(mac_low32 >> 16);
- dev->dev_addr[3] = (u8)(mac_low32 >> 24);
- dev->dev_addr[4] = (u8)(mac_high16);
- dev->dev_addr[5] = (u8)(mac_high16 >> 8);
+ addr[0] = (u8)(mac_low32);
+ addr[1] = (u8)(mac_low32 >> 8);
+ addr[2] = (u8)(mac_low32 >> 16);
+ addr[3] = (u8)(mac_low32 >> 24);
+ addr[4] = (u8)(mac_high16);
+ addr[5] = (u8)(mac_high16 >> 8);
+ eth_hw_addr_set(dev, addr);
}
/* Initializing private device structures, only called from probe */
@@ -2375,7 +2377,7 @@ static int smsc911x_probe_config(struct smsc911x_platform_config *config,
phy_interface = PHY_INTERFACE_MODE_NA;
config->phy_interface = phy_interface;
- device_get_mac_address(dev, config->mac, ETH_ALEN);
+ device_get_mac_address(dev, config->mac);
err = device_property_read_u32(dev, "reg-io-width", &width);
if (err == -ENXIO)
@@ -2525,7 +2527,7 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
SMSC_TRACE(pdata, probe,
"MAC Address is specified by configuration");
} else if (is_valid_ether_addr(pdata->config.mac)) {
- memcpy(dev->dev_addr, pdata->config.mac, ETH_ALEN);
+ eth_hw_addr_set(dev, pdata->config.mac);
SMSC_TRACE(pdata, probe,
"MAC Address specified by platform data");
} else {
diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c
index fdbd2a43e267..d937af18973e 100644
--- a/drivers/net/ethernet/smsc/smsc9420.c
+++ b/drivers/net/ethernet/smsc/smsc9420.c
@@ -404,7 +404,7 @@ static const struct ethtool_ops smsc9420_ethtool_ops = {
static void smsc9420_set_mac_address(struct net_device *dev)
{
struct smsc9420_pdata *pd = netdev_priv(dev);
- u8 *dev_addr = dev->dev_addr;
+ const u8 *dev_addr = dev->dev_addr;
u32 mac_high16 = (dev_addr[5] << 8) | dev_addr[4];
u32 mac_low32 = (dev_addr[3] << 24) | (dev_addr[2] << 16) |
(dev_addr[1] << 8) | dev_addr[0];
@@ -416,6 +416,7 @@ static void smsc9420_set_mac_address(struct net_device *dev)
static void smsc9420_check_mac_address(struct net_device *dev)
{
struct smsc9420_pdata *pd = netdev_priv(dev);
+ u8 addr[ETH_ALEN];
/* Check if mac address has been specified when bringing interface up */
if (is_valid_ether_addr(dev->dev_addr)) {
@@ -427,15 +428,16 @@ static void smsc9420_check_mac_address(struct net_device *dev)
* it will already have been set */
u32 mac_high16 = smsc9420_reg_read(pd, ADDRH);
u32 mac_low32 = smsc9420_reg_read(pd, ADDRL);
- dev->dev_addr[0] = (u8)(mac_low32);
- dev->dev_addr[1] = (u8)(mac_low32 >> 8);
- dev->dev_addr[2] = (u8)(mac_low32 >> 16);
- dev->dev_addr[3] = (u8)(mac_low32 >> 24);
- dev->dev_addr[4] = (u8)(mac_high16);
- dev->dev_addr[5] = (u8)(mac_high16 >> 8);
-
- if (is_valid_ether_addr(dev->dev_addr)) {
+ addr[0] = (u8)(mac_low32);
+ addr[1] = (u8)(mac_low32 >> 8);
+ addr[2] = (u8)(mac_low32 >> 16);
+ addr[3] = (u8)(mac_low32 >> 24);
+ addr[4] = (u8)(mac_high16);
+ addr[5] = (u8)(mac_high16 >> 8);
+
+ if (is_valid_ether_addr(addr)) {
/* eeprom values are valid so use them */
+ eth_hw_addr_set(dev, addr);
netif_dbg(pd, probe, pd->dev,
"Mac Address is read from EEPROM\n");
} else {
@@ -788,7 +790,7 @@ static int smsc9420_alloc_rx_buffer(struct smsc9420_pdata *pd, int index)
PKT_BUF_SZ, DMA_FROM_DEVICE);
if (dma_mapping_error(&pd->pdev->dev, mapping)) {
dev_kfree_skb_any(skb);
- netif_warn(pd, rx_err, pd->dev, "pci_map_single failed!\n");
+ netif_warn(pd, rx_err, pd->dev, "dma_map_single failed!\n");
return -ENOMEM;
}
@@ -940,7 +942,7 @@ static netdev_tx_t smsc9420_hard_start_xmit(struct sk_buff *skb,
DMA_TO_DEVICE);
if (dma_mapping_error(&pd->pdev->dev, mapping)) {
netif_warn(pd, tx_err, pd->dev,
- "pci_map_single failed, dropping packet\n");
+ "dma_map_single failed, dropping packet\n");
return NETDEV_TX_BUSY;
}
@@ -1551,7 +1553,7 @@ smsc9420_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (!pd->rx_ring)
goto out_free_io_4;
- /* descriptors are aligned due to the nature of pci_alloc_consistent */
+ /* descriptors are aligned due to the nature of dma_alloc_coherent */
pd->tx_ring = (pd->rx_ring + RX_RING_SIZE);
pd->tx_dma_addr = pd->rx_dma_addr +
sizeof(struct smsc9420_dma_desc) * RX_RING_SIZE;
diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
index 1f46af136aa8..de7d8bf2c226 100644
--- a/drivers/net/ethernet/socionext/netsec.c
+++ b/drivers/net/ethernet/socionext/netsec.c
@@ -1860,10 +1860,9 @@ static int netsec_of_probe(struct platform_device *pdev,
*phy_addr = of_mdio_parse_addr(&pdev->dev, priv->phy_np);
priv->clk = devm_clk_get(&pdev->dev, NULL); /* get by 'phy_ref_clk' */
- if (IS_ERR(priv->clk)) {
- dev_err(&pdev->dev, "phy_ref_clk not found\n");
- return PTR_ERR(priv->clk);
- }
+ if (IS_ERR(priv->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(priv->clk),
+ "phy_ref_clk not found\n");
priv->freq = clk_get_rate(priv->clk);
return 0;
@@ -1886,19 +1885,17 @@ static int netsec_acpi_probe(struct platform_device *pdev,
priv->phy_interface = PHY_INTERFACE_MODE_NA;
ret = device_property_read_u32(&pdev->dev, "phy-channel", phy_addr);
- if (ret) {
- dev_err(&pdev->dev,
- "missing required property 'phy-channel'\n");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "missing required property 'phy-channel'\n");
ret = device_property_read_u32(&pdev->dev,
"socionext,phy-clock-frequency",
&priv->freq);
if (ret)
- dev_err(&pdev->dev,
- "missing required property 'socionext,phy-clock-frequency'\n");
- return ret;
+ return dev_err_probe(&pdev->dev, ret,
+ "missing required property 'socionext,phy-clock-frequency'\n");
+ return 0;
}
static void netsec_unregister_mdio(struct netsec_priv *priv)
@@ -1981,7 +1978,6 @@ static int netsec_register_mdio(struct netsec_priv *priv, u32 phy_addr)
static int netsec_probe(struct platform_device *pdev)
{
struct resource *mmio_res, *eeprom_res, *irq_res;
- u8 *mac, macbuf[ETH_ALEN];
struct netsec_priv *priv;
u32 hw_ver, phy_addr = 0;
struct net_device *ndev;
@@ -2037,21 +2033,19 @@ static int netsec_probe(struct platform_device *pdev)
goto free_ndev;
}
- mac = device_get_mac_address(&pdev->dev, macbuf, sizeof(macbuf));
- if (mac)
- ether_addr_copy(ndev->dev_addr, mac);
-
- if (priv->eeprom_base &&
- (!mac || !is_valid_ether_addr(ndev->dev_addr))) {
+ ret = device_get_ethdev_address(&pdev->dev, ndev);
+ if (ret && priv->eeprom_base) {
void __iomem *macp = priv->eeprom_base +
NETSEC_EEPROM_MAC_ADDRESS;
-
- ndev->dev_addr[0] = readb(macp + 3);
- ndev->dev_addr[1] = readb(macp + 2);
- ndev->dev_addr[2] = readb(macp + 1);
- ndev->dev_addr[3] = readb(macp + 0);
- ndev->dev_addr[4] = readb(macp + 7);
- ndev->dev_addr[5] = readb(macp + 6);
+ u8 addr[ETH_ALEN];
+
+ addr[0] = readb(macp + 3);
+ addr[1] = readb(macp + 2);
+ addr[2] = readb(macp + 1);
+ addr[3] = readb(macp + 0);
+ addr[4] = readb(macp + 7);
+ addr[5] = readb(macp + 6);
+ eth_hw_addr_set(ndev, addr);
}
if (!is_valid_ether_addr(ndev->dev_addr)) {
diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c
index ae31ed93aaf0..2c48f8b8ab71 100644
--- a/drivers/net/ethernet/socionext/sni_ave.c
+++ b/drivers/net/ethernet/socionext/sni_ave.c
@@ -1599,7 +1599,7 @@ static int ave_probe(struct platform_device *pdev)
ndev->max_mtu = AVE_MAX_ETHFRAME - (ETH_HLEN + ETH_FCS_LEN);
- ret = of_get_mac_address(np, ndev->dev_addr);
+ ret = of_get_ethdev_address(np, ndev);
if (ret) {
/* if the mac address is invalid, use random mac address */
eth_hw_addr_random(ndev);
@@ -1935,6 +1935,17 @@ static const struct ave_soc_data ave_pxs3_data = {
.get_pinmode = ave_pxs3_get_pinmode,
};
+static const struct ave_soc_data ave_nx1_data = {
+ .is_desc_64bit = true,
+ .clock_names = {
+ "ether",
+ },
+ .reset_names = {
+ "ether",
+ },
+ .get_pinmode = ave_pxs3_get_pinmode,
+};
+
static const struct of_device_id of_ave_match[] = {
{
.compatible = "socionext,uniphier-pro4-ave4",
@@ -1956,6 +1967,10 @@ static const struct of_device_id of_ave_match[] = {
.compatible = "socionext,uniphier-pxs3-ave4",
.data = &ave_pxs3_data,
},
+ {
+ .compatible = "socionext,uniphier-nx1-ave4",
+ .data = &ave_nx1_data,
+ },
{ /* Sentinel */ }
};
MODULE_DEVICE_TABLE(of, of_ave_match);
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index b6d945ea903d..9160f9ed363a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -546,13 +546,13 @@ int dwmac4_setup(struct stmmac_priv *priv);
int dwxgmac2_setup(struct stmmac_priv *priv);
int dwxlgmac2_setup(struct stmmac_priv *priv);
-void stmmac_set_mac_addr(void __iomem *ioaddr, u8 addr[6],
+void stmmac_set_mac_addr(void __iomem *ioaddr, const u8 addr[6],
unsigned int high, unsigned int low);
void stmmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
unsigned int high, unsigned int low);
void stmmac_set_mac(void __iomem *ioaddr, bool enable);
-void stmmac_dwmac4_set_mac_addr(void __iomem *ioaddr, u8 addr[6],
+void stmmac_dwmac4_set_mac_addr(void __iomem *ioaddr, const u8 addr[6],
unsigned int high, unsigned int low);
void stmmac_dwmac4_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
unsigned int high, unsigned int low);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index 4422baeed3d8..617d0e4c6495 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -634,7 +634,7 @@ static void sun8i_dwmac_set_mac(void __iomem *ioaddr, bool enable)
* If addr is NULL, clear the slot
*/
static void sun8i_dwmac_set_umac_addr(struct mac_device_info *hw,
- unsigned char *addr,
+ const unsigned char *addr,
unsigned int reg_n)
{
void __iomem *ioaddr = hw->pcsr;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c
index d046e33b8a29..66fc8be34bb7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c
@@ -171,10 +171,9 @@ static int visconti_eth_clock_probe(struct platform_device *pdev,
int err;
dwmac->phy_ref_clk = devm_clk_get(&pdev->dev, "phy_ref_clk");
- if (IS_ERR(dwmac->phy_ref_clk)) {
- dev_err(&pdev->dev, "phy_ref_clk clock not found.\n");
- return PTR_ERR(dwmac->phy_ref_clk);
- }
+ if (IS_ERR(dwmac->phy_ref_clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(dwmac->phy_ref_clk),
+ "phy_ref_clk clock not found.\n");
err = clk_prepare_enable(dwmac->phy_ref_clk);
if (err < 0) {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index fc8759f146c7..76edb9b72675 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -104,7 +104,7 @@ static void dwmac1000_dump_regs(struct mac_device_info *hw, u32 *reg_space)
}
static void dwmac1000_set_umac_addr(struct mac_device_info *hw,
- unsigned char *addr,
+ const unsigned char *addr,
unsigned int reg_n)
{
void __iomem *ioaddr = hw->pcsr;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
index ebcad8dd99db..75071a7d551a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
@@ -68,7 +68,7 @@ static int dwmac100_irq_status(struct mac_device_info *hw,
}
static void dwmac100_set_umac_addr(struct mac_device_info *hw,
- unsigned char *addr,
+ const unsigned char *addr,
unsigned int reg_n)
{
void __iomem *ioaddr = hw->pcsr;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index b21745368983..fd41db65fe1d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -322,7 +322,7 @@ static void dwmac4_pmt(struct mac_device_info *hw, unsigned long mode)
}
static void dwmac4_set_umac_addr(struct mac_device_info *hw,
- unsigned char *addr, unsigned int reg_n)
+ const unsigned char *addr, unsigned int reg_n)
{
void __iomem *ioaddr = hw->pcsr;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
index 9292a1fab7d3..d1c605777985 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
@@ -187,7 +187,7 @@ int dwmac4_dma_interrupt(void __iomem *ioaddr,
return ret;
}
-void stmmac_dwmac4_set_mac_addr(void __iomem *ioaddr, u8 addr[6],
+void stmmac_dwmac4_set_mac_addr(void __iomem *ioaddr, const u8 addr[6],
unsigned int high, unsigned int low)
{
unsigned long data;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
index d1c31200bb91..caa4bfc4c1d6 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
@@ -239,7 +239,7 @@ void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr)
do {} while ((readl(ioaddr + DMA_CONTROL) & DMA_CONTROL_FTF));
}
-void stmmac_set_mac_addr(void __iomem *ioaddr, u8 addr[6],
+void stmmac_set_mac_addr(void __iomem *ioaddr, const u8 addr[6],
unsigned int high, unsigned int low)
{
unsigned long data;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
index c4d78fa93663..c6c4d7948fe5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
@@ -335,7 +335,8 @@ static void dwxgmac2_pmt(struct mac_device_info *hw, unsigned long mode)
}
static void dwxgmac2_set_umac_addr(struct mac_device_info *hw,
- unsigned char *addr, unsigned int reg_n)
+ const unsigned char *addr,
+ unsigned int reg_n)
{
void __iomem *ioaddr = hw->pcsr;
u32 value;
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h
index fe2660d5694d..f7dc447f05a0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h
@@ -330,7 +330,8 @@ struct stmmac_ops {
/* Set power management mode (e.g. magic frame) */
void (*pmt)(struct mac_device_info *hw, unsigned long mode);
/* Set/Get Unicast MAC addresses */
- void (*set_umac_addr)(struct mac_device_info *hw, unsigned char *addr,
+ void (*set_umac_addr)(struct mac_device_info *hw,
+ const unsigned char *addr,
unsigned int reg_n);
void (*get_umac_addr)(struct mac_device_info *hw, unsigned char *addr,
unsigned int reg_n);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 3d67d1fa3690..d3f350c25b9b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -2818,9 +2818,13 @@ static int stmmac_get_hw_features(struct stmmac_priv *priv)
*/
static void stmmac_check_ether_addr(struct stmmac_priv *priv)
{
+ u8 addr[ETH_ALEN];
+
if (!is_valid_ether_addr(priv->dev->dev_addr)) {
- stmmac_get_umac_addr(priv, priv->hw, priv->dev->dev_addr, 0);
- if (!is_valid_ether_addr(priv->dev->dev_addr))
+ stmmac_get_umac_addr(priv, priv->hw, addr, 0);
+ if (is_valid_ether_addr(addr))
+ eth_hw_addr_set(priv->dev, addr);
+ else
eth_hw_addr_random(priv->dev);
dev_info(priv->device, "device MAC address %pM\n",
priv->dev->dev_addr);
@@ -3510,6 +3514,8 @@ static int stmmac_request_irq_multi_msi(struct net_device *dev)
/* Request Rx MSI irq */
for (i = 0; i < priv->plat->rx_queues_to_use; i++) {
+ if (i >= MTL_MAX_RX_QUEUES)
+ break;
if (priv->rx_irq[i] == 0)
continue;
@@ -3533,6 +3539,8 @@ static int stmmac_request_irq_multi_msi(struct net_device *dev)
/* Request Tx MSI irq */
for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
+ if (i >= MTL_MAX_TX_QUEUES)
+ break;
if (priv->tx_irq[i] == 0)
continue;
@@ -6815,7 +6823,7 @@ int stmmac_dvr_probe(struct device *device,
priv->tx_irq[i] = res->tx_irq[i];
if (!is_zero_ether_addr(res->mac))
- memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
+ eth_hw_addr_set(priv->dev, res->mac);
dev_set_drvdata(device, priv->dev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
index 0462dcc93e53..be3cb63675a5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
@@ -36,7 +36,7 @@ struct stmmac_packet_attrs {
int vlan_id_in;
int vlan_id_out;
unsigned char *src;
- unsigned char *dst;
+ const unsigned char *dst;
u32 ip_src;
u32 ip_dst;
int tcp;
@@ -249,8 +249,8 @@ static int stmmac_test_loopback_validate(struct sk_buff *skb,
struct net_device *orig_ndev)
{
struct stmmac_test_priv *tpriv = pt->af_packet_priv;
+ const unsigned char *dst = tpriv->packet->dst;
unsigned char *src = tpriv->packet->src;
- unsigned char *dst = tpriv->packet->dst;
struct stmmachdr *shdr;
struct ethhdr *ehdr;
struct udphdr *uhdr;
@@ -1104,13 +1104,13 @@ static int stmmac_test_rxp(struct stmmac_priv *priv)
goto cleanup_sel;
}
- actions = kzalloc(nk * sizeof(*actions), GFP_KERNEL);
+ actions = kcalloc(nk, sizeof(*actions), GFP_KERNEL);
if (!actions) {
ret = -ENOMEM;
goto cleanup_exts;
}
- act = kzalloc(nk * sizeof(*act), GFP_KERNEL);
+ act = kcalloc(nk, sizeof(*act), GFP_KERNEL);
if (!act) {
ret = -ENOMEM;
goto cleanup_actions;
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index 287ae4c538aa..d2d4f47c7e28 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -3027,7 +3027,7 @@ static void cas_mac_reset(struct cas *cp)
/* Must be invoked under cp->lock. */
static void cas_init_mac(struct cas *cp)
{
- unsigned char *e = &cp->dev->dev_addr[0];
+ const unsigned char *e = &cp->dev->dev_addr[0];
int i;
cas_mac_reset(cp);
@@ -3379,6 +3379,7 @@ static void cas_check_pci_invariants(struct cas *cp)
static int cas_check_invariants(struct cas *cp)
{
struct pci_dev *pdev = cp->pdev;
+ u8 addr[ETH_ALEN];
u32 cfg;
int i;
@@ -3407,8 +3408,8 @@ static int cas_check_invariants(struct cas *cp)
/* finish phy determination. MDIO1 takes precedence over MDIO0 if
* they're both connected.
*/
- cp->phy_type = cas_get_vpd_info(cp, cp->dev->dev_addr,
- PCI_SLOT(pdev->devfn));
+ cp->phy_type = cas_get_vpd_info(cp, addr, PCI_SLOT(pdev->devfn));
+ eth_hw_addr_set(cp->dev, addr);
if (cp->phy_type & CAS_PHY_SERDES) {
cp->cas_flags |= CAS_FLAG_1000MB_CAP;
return 0; /* no more checking needed */
diff --git a/drivers/net/ethernet/sun/ldmvsw.c b/drivers/net/ethernet/sun/ldmvsw.c
index 50bd4e3b0af9..6b59b14e74b1 100644
--- a/drivers/net/ethernet/sun/ldmvsw.c
+++ b/drivers/net/ethernet/sun/ldmvsw.c
@@ -230,7 +230,6 @@ static struct net_device *vsw_alloc_netdev(u8 hwaddr[],
{
struct net_device *dev;
struct vnet_port *port;
- int i;
dev = alloc_etherdev_mqs(sizeof(*port), VNET_MAX_TXQS, 1);
if (!dev)
@@ -238,10 +237,8 @@ static struct net_device *vsw_alloc_netdev(u8 hwaddr[],
dev->needed_headroom = VNET_PACKET_SKIP + 8;
dev->needed_tailroom = 8;
- for (i = 0; i < ETH_ALEN; i++) {
- dev->dev_addr[i] = hwaddr[i];
- dev->perm_addr[i] = dev->dev_addr[i];
- }
+ eth_hw_addr_set(dev, hwaddr);
+ ether_addr_copy(dev->perm_addr, dev->dev_addr);
sprintf(dev->name, "vif%d.%d", (int)handle, (int)port_id);
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index a68a01d1b2b1..ba8ad76313a9 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -2603,7 +2603,7 @@ static int niu_init_link(struct niu *np)
return 0;
}
-static void niu_set_primary_mac(struct niu *np, unsigned char *addr)
+static void niu_set_primary_mac(struct niu *np, const unsigned char *addr)
{
u16 reg0 = addr[4] << 8 | addr[5];
u16 reg1 = addr[2] << 8 | addr[3];
@@ -6386,7 +6386,7 @@ static int niu_set_mac_addr(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
+ eth_hw_addr_set(dev, addr->sa_data);
if (!netif_running(dev))
return 0;
@@ -8312,6 +8312,7 @@ static void niu_pci_vpd_validate(struct niu *np)
{
struct net_device *dev = np->dev;
struct niu_vpd *vpd = &np->vpd;
+ u8 addr[ETH_ALEN];
u8 val8;
if (!is_valid_ether_addr(&vpd->local_mac[0])) {
@@ -8344,17 +8345,20 @@ static void niu_pci_vpd_validate(struct niu *np)
return;
}
- memcpy(dev->dev_addr, vpd->local_mac, ETH_ALEN);
+ ether_addr_copy(addr, vpd->local_mac);
- val8 = dev->dev_addr[5];
- dev->dev_addr[5] += np->port;
- if (dev->dev_addr[5] < val8)
- dev->dev_addr[4]++;
+ val8 = addr[5];
+ addr[5] += np->port;
+ if (addr[5] < val8)
+ addr[4]++;
+
+ eth_hw_addr_set(dev, addr);
}
static int niu_pci_probe_sprom(struct niu *np)
{
struct net_device *dev = np->dev;
+ u8 addr[ETH_ALEN];
int len, i;
u64 val, sum;
u8 val8;
@@ -8446,27 +8450,29 @@ static int niu_pci_probe_sprom(struct niu *np)
val = nr64(ESPC_MAC_ADDR0);
netif_printk(np, probe, KERN_DEBUG, np->dev,
"SPROM: MAC_ADDR0[%08llx]\n", (unsigned long long)val);
- dev->dev_addr[0] = (val >> 0) & 0xff;
- dev->dev_addr[1] = (val >> 8) & 0xff;
- dev->dev_addr[2] = (val >> 16) & 0xff;
- dev->dev_addr[3] = (val >> 24) & 0xff;
+ addr[0] = (val >> 0) & 0xff;
+ addr[1] = (val >> 8) & 0xff;
+ addr[2] = (val >> 16) & 0xff;
+ addr[3] = (val >> 24) & 0xff;
val = nr64(ESPC_MAC_ADDR1);
netif_printk(np, probe, KERN_DEBUG, np->dev,
"SPROM: MAC_ADDR1[%08llx]\n", (unsigned long long)val);
- dev->dev_addr[4] = (val >> 0) & 0xff;
- dev->dev_addr[5] = (val >> 8) & 0xff;
+ addr[4] = (val >> 0) & 0xff;
+ addr[5] = (val >> 8) & 0xff;
- if (!is_valid_ether_addr(&dev->dev_addr[0])) {
+ if (!is_valid_ether_addr(addr)) {
dev_err(np->device, "SPROM MAC address invalid [ %pM ]\n",
- dev->dev_addr);
+ addr);
return -EINVAL;
}
- val8 = dev->dev_addr[5];
- dev->dev_addr[5] += np->port;
- if (dev->dev_addr[5] < val8)
- dev->dev_addr[4]++;
+ val8 = addr[5];
+ addr[5] += np->port;
+ if (addr[5] < val8)
+ addr[4]++;
+
+ eth_hw_addr_set(dev, addr);
val = nr64(ESPC_MOD_STR_LEN);
netif_printk(np, probe, KERN_DEBUG, np->dev,
@@ -9235,7 +9241,7 @@ static int niu_get_of_props(struct niu *np)
netdev_err(dev, "%pOF: OF MAC address prop len (%d) is wrong\n",
dp, prop_len);
}
- memcpy(dev->dev_addr, mac_addr, dev->addr_len);
+ eth_hw_addr_set(dev, mac_addr);
if (!is_valid_ether_addr(&dev->dev_addr[0])) {
netdev_err(dev, "%pOF: OF MAC address is invalid\n", dp);
netdev_err(dev, "%pOF: [ %pM ]\n", dp, dev->dev_addr);
diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c
index c646575e79d5..531a6f449afa 100644
--- a/drivers/net/ethernet/sun/sunbmac.c
+++ b/drivers/net/ethernet/sun/sunbmac.c
@@ -623,7 +623,7 @@ static int bigmac_init_hw(struct bigmac *bp, bool non_blocking)
void __iomem *cregs = bp->creg;
void __iomem *bregs = bp->bregs;
__u32 bblk_dvma = (__u32)bp->bblock_dvma;
- unsigned char *e = &bp->dev->dev_addr[0];
+ const unsigned char *e = &bp->dev->dev_addr[0];
/* Latch current counters into statistics. */
bigmac_get_counters(bp, bregs);
@@ -1076,7 +1076,6 @@ static int bigmac_ether_init(struct platform_device *op,
struct net_device *dev;
u8 bsizes, bsizes_more;
struct bigmac *bp;
- int i;
/* Get a new device struct for this interface. */
dev = alloc_etherdev(sizeof(struct bigmac));
@@ -1086,8 +1085,7 @@ static int bigmac_ether_init(struct platform_device *op,
if (version_printed++ == 0)
printk(KERN_INFO "%s", version);
- for (i = 0; i < 6; i++)
- dev->dev_addr[i] = idprom->id_ethaddr[i];
+ eth_hw_addr_set(dev, idprom->id_ethaddr);
/* Setup softc, with backpointers to QEC and BigMAC SBUS device structs. */
bp = netdev_priv(dev);
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
index d72018a60c0f..036856102c50 100644
--- a/drivers/net/ethernet/sun/sungem.c
+++ b/drivers/net/ethernet/sun/sungem.c
@@ -1810,7 +1810,7 @@ static u32 gem_setup_multicast(struct gem *gp)
static void gem_init_mac(struct gem *gp)
{
- unsigned char *e = &gp->dev->dev_addr[0];
+ const unsigned char *e = &gp->dev->dev_addr[0];
writel(0x1bf0, gp->regs + MAC_SNDPAUSE);
@@ -2087,7 +2087,7 @@ static void gem_stop_phy(struct gem *gp, int wol)
writel(mifcfg, gp->regs + MIF_CFG);
if (wol && gp->has_wol) {
- unsigned char *e = &gp->dev->dev_addr[0];
+ const unsigned char *e = &gp->dev->dev_addr[0];
u32 csr;
/* Setup wake-on-lan for MAGIC packet */
@@ -2431,13 +2431,13 @@ static struct net_device_stats *gem_get_stats(struct net_device *dev)
static int gem_set_mac_address(struct net_device *dev, void *addr)
{
struct sockaddr *macaddr = (struct sockaddr *) addr;
+ const unsigned char *e = &dev->dev_addr[0];
struct gem *gp = netdev_priv(dev);
- unsigned char *e = &dev->dev_addr[0];
if (!is_valid_ether_addr(macaddr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(dev->dev_addr, macaddr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, macaddr->sa_data);
/* We'll just catch it later when the device is up'd or resumed */
if (!netif_running(dev) || !netif_device_present(dev))
@@ -2797,9 +2797,12 @@ static int gem_get_device_address(struct gem *gp)
return -1;
#endif
}
- memcpy(dev->dev_addr, addr, ETH_ALEN);
+ eth_hw_addr_set(dev, addr);
#else
- get_gem_mac_nonobp(gp->pdev, gp->dev->dev_addr);
+ u8 addr[ETH_ALEN];
+
+ get_gem_mac_nonobp(gp->pdev, addr);
+ eth_hw_addr_set(gp->dev, addr);
#endif
return 0;
}
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
index 62f81b0d14ed..ad9029ae6848 100644
--- a/drivers/net/ethernet/sun/sunhme.c
+++ b/drivers/net/ethernet/sun/sunhme.c
@@ -1395,13 +1395,13 @@ force_link:
/* hp->happy_lock must be held */
static int happy_meal_init(struct happy_meal *hp)
{
+ const unsigned char *e = &hp->dev->dev_addr[0];
void __iomem *gregs = hp->gregs;
void __iomem *etxregs = hp->etxregs;
void __iomem *erxregs = hp->erxregs;
void __iomem *bregs = hp->bigmacregs;
void __iomem *tregs = hp->tcvregs;
u32 regtmp, rxcfg;
- unsigned char *e = &hp->dev->dev_addr[0];
/* If auto-negotiation timer is running, kill it. */
del_timer(&hp->happy_timer);
@@ -2661,6 +2661,7 @@ static int happy_meal_sbus_probe_one(struct platform_device *op, int is_qfe)
struct happy_meal *hp;
struct net_device *dev;
int i, qfe_slot = -1;
+ u8 addr[ETH_ALEN];
int err = -ENODEV;
sbus_dp = op->dev.parent->of_node;
@@ -2698,7 +2699,8 @@ static int happy_meal_sbus_probe_one(struct platform_device *op, int is_qfe)
}
if (i < 6) { /* a mac address was given */
for (i = 0; i < 6; i++)
- dev->dev_addr[i] = macaddr[i];
+ addr[i] = macaddr[i];
+ eth_hw_addr_set(dev, addr);
macaddr[5]++;
} else {
const unsigned char *addr;
@@ -2707,9 +2709,9 @@ static int happy_meal_sbus_probe_one(struct platform_device *op, int is_qfe)
addr = of_get_property(dp, "local-mac-address", &len);
if (qfe_slot != -1 && addr && len == ETH_ALEN)
- memcpy(dev->dev_addr, addr, ETH_ALEN);
+ eth_hw_addr_set(dev, addr);
else
- memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
+ eth_hw_addr_set(dev, idprom->id_ethaddr);
}
hp = netdev_priv(dev);
@@ -2969,6 +2971,7 @@ static int happy_meal_pci_probe(struct pci_dev *pdev,
unsigned long hpreg_res;
int i, qfe_slot = -1;
char prom_name[64];
+ u8 addr[ETH_ALEN];
int err;
/* Now make sure pci_dev cookie is there. */
@@ -3044,7 +3047,8 @@ static int happy_meal_pci_probe(struct pci_dev *pdev,
}
if (i < 6) { /* a mac address was given */
for (i = 0; i < 6; i++)
- dev->dev_addr[i] = macaddr[i];
+ addr[i] = macaddr[i];
+ eth_hw_addr_set(dev, addr);
macaddr[5]++;
} else {
#ifdef CONFIG_SPARC
@@ -3055,12 +3059,15 @@ static int happy_meal_pci_probe(struct pci_dev *pdev,
(addr = of_get_property(dp, "local-mac-address", &len))
!= NULL &&
len == 6) {
- memcpy(dev->dev_addr, addr, ETH_ALEN);
+ eth_hw_addr_set(dev, addr);
} else {
- memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
+ eth_hw_addr_set(dev, idprom->id_ethaddr);
}
#else
- get_hme_mac_nonsparc(pdev, &dev->dev_addr[0]);
+ u8 addr[ETH_ALEN];
+
+ get_hme_mac_nonsparc(pdev, addr);
+ eth_hw_addr_set(dev, addr);
#endif
}
diff --git a/drivers/net/ethernet/sun/sunqe.c b/drivers/net/ethernet/sun/sunqe.c
index 577cd9753d8e..efe0d33f6024 100644
--- a/drivers/net/ethernet/sun/sunqe.c
+++ b/drivers/net/ethernet/sun/sunqe.c
@@ -144,7 +144,7 @@ static int qe_init(struct sunqe *qep, int from_irq)
void __iomem *cregs = qep->qcregs;
void __iomem *mregs = qep->mregs;
void __iomem *gregs = qecp->gregs;
- unsigned char *e = &qep->dev->dev_addr[0];
+ const unsigned char *e = &qep->dev->dev_addr[0];
__u32 qblk_dvma = (__u32)qep->qblock_dvma;
u32 tmp;
int i;
@@ -844,7 +844,7 @@ static int qec_ether_init(struct platform_device *op)
if (!dev)
return -ENOMEM;
- memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
+ eth_hw_addr_set(dev, idprom->id_ethaddr);
qe = netdev_priv(dev);
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index 58ee89223951..da8119625cf3 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -285,6 +285,7 @@ static struct vnet *vnet_new(const u64 *local_mac,
struct vio_dev *vdev)
{
struct net_device *dev;
+ u8 addr[ETH_ALEN];
struct vnet *vp;
int err, i;
@@ -295,7 +296,8 @@ static struct vnet *vnet_new(const u64 *local_mac,
dev->needed_tailroom = 8;
for (i = 0; i < ETH_ALEN; i++)
- dev->dev_addr[i] = (*local_mac >> (5 - i) * 8) & 0xff;
+ addr[i] = (*local_mac >> (5 - i) * 8) & 0xff;
+ eth_hw_addr_set(dev, addr);
vp = netdev_priv(dev);
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c
index df26cea45904..5c9b6c90942b 100644
--- a/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c
@@ -78,7 +78,7 @@ static int xlgmac_init(struct xlgmac_pdata *pdata)
netdev->irq = pdata->dev_irq;
netdev->base_addr = (unsigned long)pdata->mac_regs;
xlgmac_read_mac_addr(pdata);
- memcpy(netdev->dev_addr, pdata->mac_addr, netdev->addr_len);
+ eth_hw_addr_set(netdev, pdata->mac_addr);
/* Set all the function pointers */
xlgmac_init_all_ops(pdata);
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c
index bf6c1c6779ff..76eb7db80f13 100644
--- a/drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c
@@ -57,7 +57,7 @@ static int xlgmac_enable_rx_csum(struct xlgmac_pdata *pdata)
return 0;
}
-static int xlgmac_set_mac_address(struct xlgmac_pdata *pdata, u8 *addr)
+static int xlgmac_set_mac_address(struct xlgmac_pdata *pdata, const u8 *addr)
{
unsigned int mac_addr_hi, mac_addr_lo;
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
index 1db7104fef3a..d435519236e4 100644
--- a/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
@@ -798,7 +798,7 @@ static int xlgmac_set_mac_address(struct net_device *netdev, void *addr)
if (!is_valid_ether_addr(saddr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(netdev->dev_addr, saddr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, saddr->sa_data);
hw_ops->set_mac_address(pdata, netdev->dev_addr);
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac.h b/drivers/net/ethernet/synopsys/dwc-xlgmac.h
index 8598aaf3ec99..98e3a271e017 100644
--- a/drivers/net/ethernet/synopsys/dwc-xlgmac.h
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac.h
@@ -410,7 +410,7 @@ struct xlgmac_hw_ops {
void (*dev_xmit)(struct xlgmac_channel *channel);
int (*dev_read)(struct xlgmac_channel *channel);
- int (*set_mac_address)(struct xlgmac_pdata *pdata, u8 *addr);
+ int (*set_mac_address)(struct xlgmac_pdata *pdata, const u8 *addr);
int (*config_rx_mode)(struct xlgmac_pdata *pdata);
int (*enable_rx_csum)(struct xlgmac_pdata *pdata);
int (*disable_rx_csum)(struct xlgmac_pdata *pdata);
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index 6b409f9c5863..0775a5542f2f 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -832,7 +832,7 @@ static int bdx_set_mac(struct net_device *ndev, void *p)
if (netif_running(dev))
return -EBUSY
*/
- memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
+ eth_hw_addr_set(ndev, addr->sa_data);
bdx_restore_mac(ndev, priv);
RET(0);
}
@@ -840,6 +840,7 @@ static int bdx_set_mac(struct net_device *ndev, void *p)
static int bdx_read_mac(struct bdx_priv *priv)
{
u16 macAddress[3], i;
+ u8 addr[ETH_ALEN];
ENTER;
macAddress[2] = READ_REG(priv, regUNC_MAC0_A);
@@ -849,9 +850,10 @@ static int bdx_read_mac(struct bdx_priv *priv)
macAddress[0] = READ_REG(priv, regUNC_MAC2_A);
macAddress[0] = READ_REG(priv, regUNC_MAC2_A);
for (i = 0; i < 3; i++) {
- priv->ndev->dev_addr[i * 2 + 1] = macAddress[i];
- priv->ndev->dev_addr[i * 2] = macAddress[i] >> 8;
+ addr[i * 2 + 1] = macAddress[i];
+ addr[i * 2] = macAddress[i] >> 8;
}
+ eth_hw_addr_set(priv->ndev, addr);
RET(0);
}
diff --git a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
index 6e4d4f9e32e0..b05de9b61ad6 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
@@ -61,7 +61,7 @@ struct am65_cpsw_regdump_item {
#define AM65_CPSW_REGDUMP_REC(mod, start, end) { \
.hdr.module_id = (mod), \
- .hdr.len = (((u32 *)(end)) - ((u32 *)(start)) + 1) * sizeof(u32) * 2 + \
+ .hdr.len = (end + 4 - start) * 2 + \
sizeof(struct am65_cpsw_regdump_hdr), \
.start_ofs = (start), \
.end_ofs = end, \
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index 130346f74ee8..c092cb61416a 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -1918,7 +1918,7 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
port->port_id,
port->slave.mac_addr);
if (!is_valid_ether_addr(port->slave.mac_addr)) {
- random_ether_addr(port->slave.mac_addr);
+ eth_random_addr(port->slave.mac_addr);
dev_err(dev, "Use random MAC address\n");
}
}
@@ -1970,7 +1970,7 @@ am65_cpsw_nuss_init_port_ndev(struct am65_cpsw_common *common, u32 port_idx)
ndev_priv->msg_enable = AM65_CPSW_DEBUG;
SET_NETDEV_DEV(port->ndev, dev);
- ether_addr_copy(port->ndev->dev_addr, port->slave.mac_addr);
+ eth_hw_addr_set(port->ndev, port->slave.mac_addr);
port->ndev->min_mtu = AM65_CPSW_MIN_PACKET_SIZE;
port->ndev->max_mtu = AM65_CPSW_MAX_PACKET_SIZE;
@@ -2429,12 +2429,6 @@ static int am65_cpsw_nuss_register_devlink(struct am65_cpsw_common *common)
dl_priv = devlink_priv(common->devlink);
dl_priv->common = common;
- ret = devlink_register(common->devlink);
- if (ret) {
- dev_err(dev, "devlink reg fail ret:%d\n", ret);
- goto dl_free;
- }
-
/* Provide devlink hook to switch mode when multiple external ports
* are present NUSS switchdev driver is enabled.
*/
@@ -2447,7 +2441,6 @@ static int am65_cpsw_nuss_register_devlink(struct am65_cpsw_common *common)
dev_err(dev, "devlink params reg fail ret:%d\n", ret);
goto dl_unreg;
}
- devlink_params_publish(common->devlink);
}
for (i = 1; i <= common->port_num; i++) {
@@ -2468,7 +2461,7 @@ static int am65_cpsw_nuss_register_devlink(struct am65_cpsw_common *common)
}
devlink_port_type_eth_set(dl_port, port->ndev);
}
-
+ devlink_register(common->devlink);
return ret;
dl_port_unreg:
@@ -2479,10 +2472,7 @@ dl_port_unreg:
devlink_port_unregister(dl_port);
}
dl_unreg:
- devlink_unregister(common->devlink);
-dl_free:
devlink_free(common->devlink);
-
return ret;
}
@@ -2492,6 +2482,8 @@ static void am65_cpsw_unregister_devlink(struct am65_cpsw_common *common)
struct am65_cpsw_port *port;
int i;
+ devlink_unregister(common->devlink);
+
for (i = 1; i <= common->port_num; i++) {
port = am65_common_get_port(common, i);
dl_port = &port->devlink_port;
@@ -2500,13 +2492,11 @@ static void am65_cpsw_unregister_devlink(struct am65_cpsw_common *common)
}
if (!AM65_CPSW_IS_CPSW2G(common) &&
- IS_ENABLED(CONFIG_TI_K3_AM65_CPSW_SWITCHDEV)) {
- devlink_params_unpublish(common->devlink);
- devlink_params_unregister(common->devlink, am65_cpsw_devlink_params,
+ IS_ENABLED(CONFIG_TI_K3_AM65_CPSW_SWITCHDEV))
+ devlink_params_unregister(common->devlink,
+ am65_cpsw_devlink_params,
ARRAY_SIZE(am65_cpsw_devlink_params));
- }
- devlink_unregister(common->devlink);
devlink_free(common->devlink);
}
diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c
index 02d4e51f7306..7449436fc87c 100644
--- a/drivers/net/ethernet/ti/cpmac.c
+++ b/drivers/net/ethernet/ti/cpmac.c
@@ -1112,7 +1112,7 @@ static int cpmac_probe(struct platform_device *pdev)
priv->dev = dev;
priv->ring_size = 64;
priv->msg_enable = netif_msg_init(debug_level, 0xff);
- memcpy(dev->dev_addr, pdata->dev_addr, sizeof(pdata->dev_addr));
+ eth_hw_addr_set(dev, pdata->dev_addr);
snprintf(priv->phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT,
mdio_bus_id, phy_id);
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 66f7ddd9b1f9..33142d505fc8 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -985,7 +985,7 @@ static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
flags, vid);
memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
- memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN);
+ eth_hw_addr_set(ndev, priv->mac_addr);
for_each_slave(priv, cpsw_set_slave_mac, priv);
pm_runtime_put(cpsw->dev);
@@ -1460,7 +1460,7 @@ static int cpsw_probe_dual_emac(struct cpsw_priv *priv)
dev_info(cpsw->dev, "cpsw: Random MACID = %pM\n",
priv_sl2->mac_addr);
}
- memcpy(ndev->dev_addr, priv_sl2->mac_addr, ETH_ALEN);
+ eth_hw_addr_set(ndev, priv_sl2->mac_addr);
priv_sl2->emac_port = 1;
cpsw->slaves[1].ndev = ndev;
@@ -1639,7 +1639,7 @@ static int cpsw_probe(struct platform_device *pdev)
dev_info(dev, "Random MACID = %pM\n", priv->mac_addr);
}
- memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN);
+ eth_hw_addr_set(ndev, priv->mac_addr);
cpsw->slaves[0].ndev = ndev;
diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c
index 7968f24d99c8..279e261e4720 100644
--- a/drivers/net/ethernet/ti/cpsw_new.c
+++ b/drivers/net/ethernet/ti/cpsw_new.c
@@ -1000,7 +1000,7 @@ static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
flags, vid);
ether_addr_copy(priv->mac_addr, addr->sa_data);
- ether_addr_copy(ndev->dev_addr, priv->mac_addr);
+ eth_hw_addr_set(ndev, priv->mac_addr);
cpsw_set_slave_mac(&cpsw->slaves[slave_no], priv);
pm_runtime_put(cpsw->dev);
@@ -1401,7 +1401,7 @@ static int cpsw_create_ports(struct cpsw_common *cpsw)
dev_info(cpsw->dev, "Random MACID = %pM\n",
priv->mac_addr);
}
- ether_addr_copy(ndev->dev_addr, slave_data->mac_addr);
+ eth_hw_addr_set(ndev, slave_data->mac_addr);
ether_addr_copy(priv->mac_addr, slave_data->mac_addr);
cpsw->slaves[i].ndev = ndev;
@@ -1810,12 +1810,6 @@ static int cpsw_register_devlink(struct cpsw_common *cpsw)
dl_priv = devlink_priv(cpsw->devlink);
dl_priv->cpsw = cpsw;
- ret = devlink_register(cpsw->devlink);
- if (ret) {
- dev_err(dev, "DL reg fail ret:%d\n", ret);
- goto dl_free;
- }
-
ret = devlink_params_register(cpsw->devlink, cpsw_devlink_params,
ARRAY_SIZE(cpsw_devlink_params));
if (ret) {
@@ -1823,22 +1817,19 @@ static int cpsw_register_devlink(struct cpsw_common *cpsw)
goto dl_unreg;
}
- devlink_params_publish(cpsw->devlink);
+ devlink_register(cpsw->devlink);
return ret;
dl_unreg:
- devlink_unregister(cpsw->devlink);
-dl_free:
devlink_free(cpsw->devlink);
return ret;
}
static void cpsw_unregister_devlink(struct cpsw_common *cpsw)
{
- devlink_params_unpublish(cpsw->devlink);
+ devlink_unregister(cpsw->devlink);
devlink_params_unregister(cpsw->devlink, cpsw_devlink_params,
ARRAY_SIZE(cpsw_devlink_params));
- devlink_unregister(cpsw->devlink);
devlink_free(cpsw->devlink);
}
diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c
index 43222a34cba0..dc70a6bfaa6a 100644
--- a/drivers/net/ethernet/ti/cpts.c
+++ b/drivers/net/ethernet/ti/cpts.c
@@ -669,10 +669,10 @@ static int cpts_of_mux_clk_setup(struct cpts *cpts, struct device_node *node)
goto mux_fail;
}
- parent_names = devm_kzalloc(cpts->dev, (sizeof(char *) * num_parents),
- GFP_KERNEL);
+ parent_names = devm_kcalloc(cpts->dev, num_parents,
+ sizeof(*parent_names), GFP_KERNEL);
- mux_table = devm_kzalloc(cpts->dev, sizeof(*mux_table) * num_parents,
+ mux_table = devm_kcalloc(cpts->dev, num_parents, sizeof(*mux_table),
GFP_KERNEL);
if (!mux_table || !parent_names) {
ret = -ENOMEM;
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index e8291d848839..2d2dcf70563f 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -1132,7 +1132,7 @@ static int emac_dev_setmac_addr(struct net_device *ndev, void *addr)
/* Store mac addr in priv and rx channel and set it in EMAC hw */
memcpy(priv->mac_addr, sa->sa_data, ndev->addr_len);
- memcpy(ndev->dev_addr, sa->sa_data, ndev->addr_len);
+ eth_hw_addr_set(ndev, sa->sa_data);
/* MAC address is configured only after the interface is enabled. */
if (netif_running(ndev)) {
@@ -1402,7 +1402,6 @@ static int match_first_device(struct device *dev, const void *data)
static int emac_dev_open(struct net_device *ndev)
{
struct device *emac_dev = &ndev->dev;
- u32 cnt;
struct resource *res;
int q, m, ret;
int res_num = 0, irq_num = 0;
@@ -1420,8 +1419,7 @@ static int emac_dev_open(struct net_device *ndev)
}
netif_carrier_off(ndev);
- for (cnt = 0; cnt < ETH_ALEN; cnt++)
- ndev->dev_addr[cnt] = priv->mac_addr[cnt];
+ eth_hw_addr_set(ndev, priv->mac_addr);
/* Configuration items */
priv->rx_buf_size = EMAC_DEF_MAX_FRAME_SIZE + NET_IP_ALIGN;
@@ -1899,7 +1897,7 @@ static int davinci_emac_probe(struct platform_device *pdev)
rc = davinci_emac_try_get_mac(pdev, res_ctrl ? 0 : 1, priv->mac_addr);
if (!rc)
- ether_addr_copy(ndev->dev_addr, priv->mac_addr);
+ eth_hw_addr_set(ndev, priv->mac_addr);
if (!is_valid_ether_addr(priv->mac_addr)) {
/* Use random MAC if still none obtained. */
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index eda2961c0fe2..b818e4579f6f 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -2028,16 +2028,16 @@ static int netcp_create_interface(struct netcp_device *netcp_device,
emac_arch_get_mac_addr(efuse_mac_addr, efuse, efuse_mac);
if (is_valid_ether_addr(efuse_mac_addr))
- ether_addr_copy(ndev->dev_addr, efuse_mac_addr);
+ eth_hw_addr_set(ndev, efuse_mac_addr);
else
- eth_random_addr(ndev->dev_addr);
+ eth_hw_addr_random(ndev);
devm_iounmap(dev, efuse);
devm_release_mem_region(dev, res.start, size);
} else {
- ret = of_get_mac_address(node_interface, ndev->dev_addr);
+ ret = of_get_ethdev_address(node_interface, ndev);
if (ret)
- eth_random_addr(ndev->dev_addr);
+ eth_hw_addr_random(ndev);
}
ret = of_property_read_string(node_interface, "rx-channel",
diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c
index 77c448ad67ce..741c42c6a417 100644
--- a/drivers/net/ethernet/ti/tlan.c
+++ b/drivers/net/ethernet/ti/tlan.c
@@ -184,7 +184,7 @@ static void tlan_print_list(struct tlan_list *, char *, int);
static void tlan_read_and_clear_stats(struct net_device *, int);
static void tlan_reset_adapter(struct net_device *);
static void tlan_finish_reset(struct net_device *);
-static void tlan_set_mac(struct net_device *, int areg, char *mac);
+static void tlan_set_mac(struct net_device *, int areg, const char *mac);
static void __tlan_phy_print(struct net_device *);
static void tlan_phy_print(struct net_device *);
@@ -817,6 +817,7 @@ static int tlan_init(struct net_device *dev)
int err;
int i;
struct tlan_priv *priv;
+ u8 addr[ETH_ALEN];
priv = netdev_priv(dev);
@@ -842,7 +843,7 @@ static int tlan_init(struct net_device *dev)
for (i = 0; i < ETH_ALEN; i++)
err |= tlan_ee_read_byte(dev,
(u8) priv->adapter->addr_ofs + i,
- (u8 *) &dev->dev_addr[i]);
+ addr + i);
if (err) {
pr_err("%s: Error reading MAC from eeprom: %d\n",
dev->name, err);
@@ -850,11 +851,12 @@ static int tlan_init(struct net_device *dev)
/* Olicom OC-2325/OC-2326 have the address byte-swapped */
if (priv->adapter->addr_ofs == 0xf8) {
for (i = 0; i < ETH_ALEN; i += 2) {
- char tmp = dev->dev_addr[i];
- dev->dev_addr[i] = dev->dev_addr[i + 1];
- dev->dev_addr[i + 1] = tmp;
+ char tmp = addr[i];
+ addr[i] = addr[i + 1];
+ addr[i + 1] = tmp;
}
}
+ eth_hw_addr_set(dev, addr);
netif_carrier_off(dev);
@@ -2346,7 +2348,7 @@ tlan_finish_reset(struct net_device *dev)
*
**************************************************************/
-static void tlan_set_mac(struct net_device *dev, int areg, char *mac)
+static void tlan_set_mac(struct net_device *dev, int areg, const char *mac)
{
int i;
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
index 55e652624bd7..3dbfb1b20649 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
@@ -1477,7 +1477,7 @@ int gelic_net_setup_netdev(struct net_device *netdev, struct gelic_card *card)
__func__, status);
return -EINVAL;
}
- memcpy(netdev->dev_addr, &v1, ETH_ALEN);
+ eth_hw_addr_set(netdev, (u8 *)&v1);
if (card->vlan_required) {
netdev->hard_header_len += VLAN_HLEN;
diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c
index 66d4e024d11e..f50f9a43d3ea 100644
--- a/drivers/net/ethernet/toshiba/spider_net.c
+++ b/drivers/net/ethernet/toshiba/spider_net.c
@@ -1296,7 +1296,7 @@ spider_net_set_mac(struct net_device *netdev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(netdev->dev_addr, addr->sa_data, ETH_ALEN);
+ eth_hw_addr_set(netdev, addr->sa_data);
/* switch off GMACTPE and GMACRPE */
regvalue = spider_net_read_reg(card, SPIDER_NET_GMACOPEMD);
diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c
index 52245ac60fc7..ce38f7515225 100644
--- a/drivers/net/ethernet/toshiba/tc35815.c
+++ b/drivers/net/ethernet/toshiba/tc35815.c
@@ -708,7 +708,7 @@ static int tc35815_read_plat_dev_addr(struct net_device *dev)
lp->pci_dev, tc35815_mac_match);
if (pd) {
if (pd->platform_data)
- memcpy(dev->dev_addr, pd->platform_data, ETH_ALEN);
+ eth_hw_addr_set(dev, pd->platform_data);
put_device(pd);
return is_valid_ether_addr(dev->dev_addr) ? 0 : -ENODEV;
}
@@ -725,6 +725,7 @@ static int tc35815_init_dev_addr(struct net_device *dev)
{
struct tc35815_regs __iomem *tr =
(struct tc35815_regs __iomem *)dev->base_addr;
+ u8 addr[ETH_ALEN];
int i;
while (tc_readl(&tr->PROM_Ctl) & PROM_Busy)
@@ -735,9 +736,10 @@ static int tc35815_init_dev_addr(struct net_device *dev)
while (tc_readl(&tr->PROM_Ctl) & PROM_Busy)
;
data = tc_readl(&tr->PROM_Data);
- dev->dev_addr[i] = data & 0xff;
- dev->dev_addr[i+1] = data >> 8;
+ addr[i] = data & 0xff;
+ addr[i+1] = data >> 8;
}
+ eth_hw_addr_set(dev, addr);
if (!is_valid_ether_addr(dev->dev_addr))
return tc35815_read_plat_dev_addr(dev);
return 0;
@@ -1859,7 +1861,8 @@ static struct net_device_stats *tc35815_get_stats(struct net_device *dev)
return &dev->stats;
}
-static void tc35815_set_cam_entry(struct net_device *dev, int index, unsigned char *addr)
+static void tc35815_set_cam_entry(struct net_device *dev, int index,
+ const unsigned char *addr)
{
struct tc35815_local *lp = netdev_priv(dev);
struct tc35815_regs __iomem *tr =
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index 3b73a9c55a5a..509c5e9b29df 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -899,6 +899,7 @@ static int rhine_init_one_common(struct device *hwdev, u32 quirks,
struct net_device *dev;
struct rhine_private *rp;
int i, rc, phy_id;
+ u8 addr[ETH_ALEN];
const char *name;
/* this should always be supported */
@@ -933,7 +934,8 @@ static int rhine_init_one_common(struct device *hwdev, u32 quirks,
rhine_hw_init(dev, pioaddr);
for (i = 0; i < 6; i++)
- dev->dev_addr[i] = ioread8(ioaddr + StationAddr + i);
+ addr[i] = ioread8(ioaddr + StationAddr + i);
+ eth_hw_addr_set(dev, addr);
if (!is_valid_ether_addr(dev->dev_addr)) {
/* Report it and use a random ethernet address instead */
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index 4b9c30f735b5..be2b992f24d9 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -2767,6 +2767,7 @@ static int velocity_probe(struct device *dev, int irq,
struct velocity_info *vptr;
struct mac_regs __iomem *regs;
int ret = -ENOMEM;
+ u8 addr[ETH_ALEN];
/* FIXME: this driver, like almost all other ethernet drivers,
* can support more than MAX_UNITS.
@@ -2820,7 +2821,8 @@ static int velocity_probe(struct device *dev, int irq,
mac_wol_reset(regs);
for (i = 0; i < 6; i++)
- netdev->dev_addr[i] = readb(&regs->PAR[i]);
+ addr[i] = readb(&regs->PAR[i]);
+ eth_hw_addr_set(netdev, addr);
velocity_get_options(&vptr->options, velocity_nics);
diff --git a/drivers/net/ethernet/wiznet/w5100-spi.c b/drivers/net/ethernet/wiznet/w5100-spi.c
index 2b84848dc26a..7779a36da3c8 100644
--- a/drivers/net/ethernet/wiznet/w5100-spi.c
+++ b/drivers/net/ethernet/wiznet/w5100-spi.c
@@ -463,7 +463,9 @@ static int w5100_spi_probe(struct spi_device *spi)
static int w5100_spi_remove(struct spi_device *spi)
{
- return w5100_remove(&spi->dev);
+ w5100_remove(&spi->dev);
+
+ return 0;
}
static const struct spi_device_id w5100_spi_ids[] = {
diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c
index f974e70a82e8..ae24d6b86803 100644
--- a/drivers/net/ethernet/wiznet/w5100.c
+++ b/drivers/net/ethernet/wiznet/w5100.c
@@ -985,7 +985,7 @@ static int w5100_set_macaddr(struct net_device *ndev, void *addr)
if (!is_valid_ether_addr(sock_addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(ndev->dev_addr, sock_addr->sa_data, ETH_ALEN);
+ eth_hw_addr_set(ndev, sock_addr->sa_data);
w5100_write_macaddr(priv);
return 0;
}
@@ -1064,7 +1064,9 @@ static int w5100_mmio_probe(struct platform_device *pdev)
static int w5100_mmio_remove(struct platform_device *pdev)
{
- return w5100_remove(&pdev->dev);
+ w5100_remove(&pdev->dev);
+
+ return 0;
}
void *w5100_ops_priv(const struct net_device *ndev)
@@ -1155,7 +1157,7 @@ int w5100_probe(struct device *dev, const struct w5100_ops *ops,
INIT_WORK(&priv->restart_work, w5100_restart_work);
if (mac_addr)
- memcpy(ndev->dev_addr, mac_addr, ETH_ALEN);
+ eth_hw_addr_set(ndev, mac_addr);
else
eth_hw_addr_random(ndev);
@@ -1210,7 +1212,7 @@ err_register:
}
EXPORT_SYMBOL_GPL(w5100_probe);
-int w5100_remove(struct device *dev)
+void w5100_remove(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
struct w5100_priv *priv = netdev_priv(ndev);
@@ -1226,7 +1228,6 @@ int w5100_remove(struct device *dev)
unregister_netdev(ndev);
free_netdev(ndev);
- return 0;
}
EXPORT_SYMBOL_GPL(w5100_remove);
diff --git a/drivers/net/ethernet/wiznet/w5100.h b/drivers/net/ethernet/wiznet/w5100.h
index 5d3d4b541fec..481af3b6d9e8 100644
--- a/drivers/net/ethernet/wiznet/w5100.h
+++ b/drivers/net/ethernet/wiznet/w5100.h
@@ -31,6 +31,6 @@ void *w5100_ops_priv(const struct net_device *ndev);
int w5100_probe(struct device *dev, const struct w5100_ops *ops,
int sizeof_ops_priv, const void *mac_addr, int irq,
int link_gpio);
-int w5100_remove(struct device *dev);
+void w5100_remove(struct device *dev);
extern const struct dev_pm_ops w5100_pm_ops;
diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c
index 46aae30c4636..402d5036f266 100644
--- a/drivers/net/ethernet/wiznet/w5300.c
+++ b/drivers/net/ethernet/wiznet/w5300.c
@@ -472,7 +472,7 @@ static int w5300_set_macaddr(struct net_device *ndev, void *addr)
if (!is_valid_ether_addr(sock_addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(ndev->dev_addr, sock_addr->sa_data, ETH_ALEN);
+ eth_hw_addr_set(ndev, sock_addr->sa_data);
w5300_write_macaddr(priv);
return 0;
}
@@ -534,7 +534,7 @@ static int w5300_hw_probe(struct platform_device *pdev)
int ret;
if (data && is_valid_ether_addr(data->mac_addr)) {
- memcpy(ndev->dev_addr, data->mac_addr, ETH_ALEN);
+ eth_hw_addr_set(ndev, data->mac_addr);
} else {
eth_hw_addr_random(ndev);
}
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 463094ced104..e7065c9a8e38 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -438,7 +438,7 @@ static void temac_do_set_mac_address(struct net_device *ndev)
static int temac_init_mac_address(struct net_device *ndev, const void *address)
{
- memcpy(ndev->dev_addr, address, ETH_ALEN);
+ eth_hw_addr_set(ndev, address);
if (!is_valid_ether_addr(ndev->dev_addr))
eth_hw_addr_random(ndev);
temac_do_set_mac_address(ndev);
@@ -451,7 +451,7 @@ static int temac_set_mac_address(struct net_device *ndev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(ndev->dev_addr, addr->sa_data, ETH_ALEN);
+ eth_hw_addr_set(ndev, addr->sa_data);
temac_do_set_mac_address(ndev);
return 0;
}
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 871b5ec3183d..0b7606987c1e 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -360,7 +360,7 @@ static void axienet_set_mac_address(struct net_device *ndev,
struct axienet_local *lp = netdev_priv(ndev);
if (address)
- memcpy(ndev->dev_addr, address, ETH_ALEN);
+ eth_hw_addr_set(ndev, address);
if (!is_valid_ether_addr(ndev->dev_addr))
eth_hw_addr_random(ndev);
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index b780aad3550a..0815de581c7f 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -206,12 +206,13 @@ static void xemaclite_disable_interrupts(struct net_local *drvdata)
* This function writes data from a 16-bit aligned buffer to a 32-bit aligned
* address in the EmacLite device.
*/
-static void xemaclite_aligned_write(void *src_ptr, u32 *dest_ptr,
+static void xemaclite_aligned_write(const void *src_ptr, u32 *dest_ptr,
unsigned length)
{
+ const u16 *from_u16_ptr;
u32 align_buffer;
u32 *to_u32_ptr;
- u16 *from_u16_ptr, *to_u16_ptr;
+ u16 *to_u16_ptr;
to_u32_ptr = dest_ptr;
from_u16_ptr = src_ptr;
@@ -470,7 +471,7 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data, int maxlen)
* buffers (if configured).
*/
static void xemaclite_update_address(struct net_local *drvdata,
- u8 *address_ptr)
+ const u8 *address_ptr)
{
void __iomem *addr;
u32 reg_data;
@@ -511,7 +512,7 @@ static int xemaclite_set_mac_address(struct net_device *dev, void *address)
if (netif_running(dev))
return -EBUSY;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, addr->sa_data);
xemaclite_update_address(lp, dev->dev_addr);
return 0;
}
@@ -1157,7 +1158,7 @@ static int xemaclite_of_probe(struct platform_device *ofdev)
lp->tx_ping_pong = get_bool(ofdev, "xlnx,tx-ping-pong");
lp->rx_ping_pong = get_bool(ofdev, "xlnx,rx-ping-pong");
- rc = of_get_mac_address(ofdev->dev.of_node, ndev->dev_addr);
+ rc = of_get_ethdev_address(ofdev->dev.of_node, ndev);
if (rc) {
dev_warn(dev, "No MAC address found, using random\n");
eth_hw_addr_random(ndev);
diff --git a/drivers/net/ethernet/xircom/xirc2ps_cs.c b/drivers/net/ethernet/xircom/xirc2ps_cs.c
index ae611e46da6a..f9587e55b842 100644
--- a/drivers/net/ethernet/xircom/xirc2ps_cs.c
+++ b/drivers/net/ethernet/xircom/xirc2ps_cs.c
@@ -671,7 +671,6 @@ static int pcmcia_get_mac_ce(struct pcmcia_device *p_dev,
void *priv)
{
struct net_device *dev = priv;
- int i;
if (tuple->TupleDataLen != 13)
return -EINVAL;
@@ -679,8 +678,7 @@ static int pcmcia_get_mac_ce(struct pcmcia_device *p_dev,
(tuple->TupleData[2] != 6))
return -EINVAL;
/* another try (James Lehmer's CE2 version 4.1)*/
- for (i = 2; i < 6; i++)
- dev->dev_addr[i] = tuple->TupleData[i+2];
+ dev_addr_mod(dev, 2, &tuple->TupleData[2], 4);
return 0;
};
@@ -742,11 +740,9 @@ xirc2ps_config(struct pcmcia_device * link)
len = pcmcia_get_tuple(link, 0x89, &buf);
/* data layout looks like tuple 0x22 */
if (buf && len == 8) {
- if (*buf == CISTPL_FUNCE_LAN_NODE_ID) {
- int i;
- for (i = 2; i < 6; i++)
- dev->dev_addr[i] = buf[i+2];
- } else
+ if (*buf == CISTPL_FUNCE_LAN_NODE_ID)
+ dev_addr_mod(dev, 2, &buf[2], 4);
+ else
err = -1;
}
kfree(buf);
@@ -1271,7 +1267,7 @@ struct set_address_info {
unsigned int ioaddr;
};
-static void set_address(struct set_address_info *sa_info, char *addr)
+static void set_address(struct set_address_info *sa_info, const char *addr)
{
unsigned int ioaddr = sa_info->ioaddr;
int i;
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index 931494cc1c39..65fdad1107fc 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -1103,10 +1103,9 @@ static int init_queues(struct port *port)
return -ENOMEM;
}
- if (!(port->desc_tab = dma_pool_alloc(dma_pool, GFP_KERNEL,
- &port->desc_tab_phys)))
+ port->desc_tab = dma_pool_zalloc(dma_pool, GFP_KERNEL, &port->desc_tab_phys);
+ if (!port->desc_tab)
return -ENOMEM;
- memset(port->desc_tab, 0, POOL_ALLOC_SIZE);
memset(port->rx_buff_tab, 0, sizeof(port->rx_buff_tab)); /* tables */
memset(port->tx_buff_tab, 0, sizeof(port->tx_buff_tab));
@@ -1524,7 +1523,7 @@ static int ixp4xx_eth_probe(struct platform_device *pdev)
port->plat = plat;
npe_port_tab[NPE_ID(port->id)] = port;
- memcpy(ndev->dev_addr, plat->hwaddr, ETH_ALEN);
+ eth_hw_addr_set(ndev, plat->hwaddr);
platform_set_drvdata(pdev, ndev);
diff --git a/drivers/net/fddi/defxx.c b/drivers/net/fddi/defxx.c
index 6d1e3f49a3d3..5810e8473789 100644
--- a/drivers/net/fddi/defxx.c
+++ b/drivers/net/fddi/defxx.c
@@ -1028,7 +1028,7 @@ static void dfx_bus_config_check(DFX_board_t *bp)
* or read adapter MAC address
*
* Assumptions:
- * Memory allocated from pci_alloc_consistent() call is physically
+ * Memory allocated from dma_alloc_coherent() call is physically
* contiguous, locked memory.
*
* Side Effects:
@@ -3249,7 +3249,7 @@ static void dfx_rcv_queue_process(
* is contained in a single physically contiguous buffer
* in which the virtual address of the start of packet
* (skb->data) can be converted to a physical address
- * by using pci_map_single().
+ * by using dma_map_single().
*
* Since the adapter architecture requires a three byte
* packet request header to prepend the start of packet,
@@ -3402,7 +3402,7 @@ static netdev_tx_t dfx_xmt_queue_pkt(struct sk_buff *skb,
* skb->data.
* 6. The physical address of the start of packet
* can be determined from the virtual address
- * by using pci_map_single() and is only 32-bits
+ * by using dma_map_single() and is only 32-bits
* wide.
*/
diff --git a/drivers/net/fddi/skfp/skfddi.c b/drivers/net/fddi/skfp/skfddi.c
index c5cb421f9890..652cb174302e 100644
--- a/drivers/net/fddi/skfp/skfddi.c
+++ b/drivers/net/fddi/skfp/skfddi.c
@@ -78,6 +78,7 @@ static const char * const boot_msg =
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
#include <linux/fddidevice.h>
#include <linux/skbuff.h>
#include <linux/bitops.h>
@@ -433,7 +434,7 @@ static int skfp_driver_init(struct net_device *dev)
}
read_address(smc, NULL);
pr_debug("HW-Addr: %pMF\n", smc->hw.fddi_canon_addr.a);
- memcpy(dev->dev_addr, smc->hw.fddi_canon_addr.a, ETH_ALEN);
+ eth_hw_addr_set(dev, smc->hw.fddi_canon_addr.a);
smt_reset_defaults(smc, 0);
@@ -500,7 +501,7 @@ static int skfp_open(struct net_device *dev)
* address.
*/
read_address(smc, NULL);
- memcpy(dev->dev_addr, smc->hw.fddi_canon_addr.a, ETH_ALEN);
+ eth_hw_addr_set(dev, smc->hw.fddi_canon_addr.a);
init_smt(smc, NULL);
smt_online(smc, 1);
@@ -1012,7 +1013,7 @@ static int skfp_siocdevprivate(struct net_device *dev, struct ifreq *rq, void __
* is contained in a single physically contiguous buffer
* in which the virtual address of the start of packet
* (skb->data) can be converted to a physical address
- * by using pci_map_single().
+ * by using dma_map_single().
*
* We have an internal queue for packets we can not send
* immediately. Packets in this queue can be given to the
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index 30e0a10595a1..24e5c54d06c1 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -539,7 +539,7 @@ static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev,
mtu = dst_mtu(&rt->dst);
}
- rt->dst.ops->update_pmtu(&rt->dst, NULL, skb, mtu, false);
+ skb_dst_update_pmtu_no_confirm(skb, mtu);
if (!skb_is_gso(skb) && (iph->frag_off & htons(IP_DF)) &&
mtu < ntohs(iph->tot_len)) {
diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
index 6192244b304a..f4e8793e995d 100644
--- a/drivers/net/hamradio/6pack.c
+++ b/drivers/net/hamradio/6pack.c
@@ -288,7 +288,7 @@ static int sp_set_mac_address(struct net_device *dev, void *addr)
netif_tx_lock_bh(dev);
netif_addr_lock(dev);
- memcpy(dev->dev_addr, &sa->sax25_call, AX25_ADDR_LEN);
+ __dev_addr_set(dev, &sa->sax25_call, AX25_ADDR_LEN);
netif_addr_unlock(dev);
netif_tx_unlock_bh(dev);
@@ -317,7 +317,7 @@ static void sp_setup(struct net_device *dev)
/* Only activated in AX.25 mode */
memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN);
- memcpy(dev->dev_addr, &ax25_defaddr, AX25_ADDR_LEN);
+ dev_addr_set(dev, (u8 *)&ax25_defaddr);
dev->flags = 0;
}
@@ -726,7 +726,7 @@ static int sixpack_ioctl(struct tty_struct *tty, struct file *file,
}
netif_tx_lock_bh(dev);
- memcpy(dev->dev_addr, &addr, AX25_ADDR_LEN);
+ __dev_addr_set(dev, &addr, AX25_ADDR_LEN);
netif_tx_unlock_bh(dev);
err = 0;
break;
diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c
index 6b6f28d5b8d5..a03d0b474641 100644
--- a/drivers/net/hamradio/baycom_epp.c
+++ b/drivers/net/hamradio/baycom_epp.c
@@ -791,7 +791,7 @@ static int baycom_set_mac_address(struct net_device *dev, void *addr)
struct sockaddr *sa = (struct sockaddr *)addr;
/* addr is an AX.25 shifted ASCII mac address */
- memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
+ dev_addr_set(dev, sa->sa_data);
return 0;
}
@@ -1159,7 +1159,7 @@ static void baycom_probe(struct net_device *dev)
dev->mtu = AX25_DEF_PACLEN; /* eth_mtu is the default */
dev->addr_len = AX25_ADDR_LEN; /* sizeof an ax.25 address */
memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN);
- memcpy(dev->dev_addr, &null_ax25_address, AX25_ADDR_LEN);
+ dev_addr_set(dev, (u8 *)&null_ax25_address);
dev->tx_queue_len = 16;
/* New style flags */
diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c
index d967b0748773..30af0081e2be 100644
--- a/drivers/net/hamradio/bpqether.c
+++ b/drivers/net/hamradio/bpqether.c
@@ -302,7 +302,7 @@ static int bpq_set_mac_address(struct net_device *dev, void *addr)
{
struct sockaddr *sa = (struct sockaddr *)addr;
- memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
+ dev_addr_set(dev, sa->sa_data);
return 0;
}
@@ -457,9 +457,6 @@ static void bpq_setup(struct net_device *dev)
dev->netdev_ops = &bpq_netdev_ops;
dev->needs_free_netdev = true;
- memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN);
- memcpy(dev->dev_addr, &ax25_defaddr, AX25_ADDR_LEN);
-
dev->flags = 0;
dev->features = NETIF_F_LLTX; /* Allow recursion */
@@ -472,6 +469,8 @@ static void bpq_setup(struct net_device *dev)
dev->mtu = AX25_DEF_PACLEN;
dev->addr_len = AX25_ADDR_LEN;
+ memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN);
+ dev_addr_set(dev, (u8 *)&ax25_defaddr);
}
/*
diff --git a/drivers/net/hamradio/dmascc.c b/drivers/net/hamradio/dmascc.c
index f4c3efc3e074..7e527499d3ad 100644
--- a/drivers/net/hamradio/dmascc.c
+++ b/drivers/net/hamradio/dmascc.c
@@ -426,7 +426,7 @@ static void __init dev_setup(struct net_device *dev)
dev->addr_len = AX25_ADDR_LEN;
dev->tx_queue_len = 64;
memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN);
- memcpy(dev->dev_addr, &ax25_defaddr, AX25_ADDR_LEN);
+ dev_addr_set(dev, (u8 *)&ax25_defaddr);
}
static const struct net_device_ops scc_netdev_ops = {
@@ -956,8 +956,7 @@ static int scc_send_packet(struct sk_buff *skb, struct net_device *dev)
static int scc_set_mac_address(struct net_device *dev, void *sa)
{
- memcpy(dev->dev_addr, ((struct sockaddr *) sa)->sa_data,
- dev->addr_len);
+ dev_addr_set(dev, ((struct sockaddr *)sa)->sa_data);
return 0;
}
diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c
index 5805cfc83854..b0edb91bb10a 100644
--- a/drivers/net/hamradio/hdlcdrv.c
+++ b/drivers/net/hamradio/hdlcdrv.c
@@ -415,7 +415,7 @@ static int hdlcdrv_set_mac_address(struct net_device *dev, void *addr)
struct sockaddr *sa = (struct sockaddr *)addr;
/* addr is an AX.25 shifted ASCII mac address */
- memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
+ dev_addr_set(dev, sa->sa_data);
return 0;
}
@@ -675,7 +675,7 @@ static void hdlcdrv_setup(struct net_device *dev)
dev->mtu = AX25_DEF_PACLEN; /* eth_mtu is the default */
dev->addr_len = AX25_ADDR_LEN; /* sizeof an ax.25 address */
memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN);
- memcpy(dev->dev_addr, &ax25_defaddr, AX25_ADDR_LEN);
+ dev_addr_set(dev, (u8 *)&ax25_defaddr);
dev->tx_queue_len = 16;
}
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index 8666110bec55..867252a0247b 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -344,7 +344,7 @@ static int ax_set_mac_address(struct net_device *dev, void *addr)
netif_tx_lock_bh(dev);
netif_addr_lock(dev);
- memcpy(dev->dev_addr, &sa->sax25_call, AX25_ADDR_LEN);
+ __dev_addr_set(dev, &sa->sax25_call, AX25_ADDR_LEN);
netif_addr_unlock(dev);
netif_tx_unlock_bh(dev);
@@ -647,7 +647,7 @@ static void ax_setup(struct net_device *dev)
memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN);
- memcpy(dev->dev_addr, &ax25_defaddr, AX25_ADDR_LEN);
+ dev_addr_set(dev, (u8 *)&ax25_defaddr);
dev->flags = IFF_BROADCAST | IFF_MULTICAST;
}
@@ -850,7 +850,7 @@ static int mkiss_ioctl(struct tty_struct *tty, struct file *file,
}
netif_tx_lock_bh(dev);
- memcpy(dev->dev_addr, addr, AX25_ADDR_LEN);
+ __dev_addr_set(dev, addr, AX25_ADDR_LEN);
netif_tx_unlock_bh(dev);
err = 0;
diff --git a/drivers/net/hamradio/scc.c b/drivers/net/hamradio/scc.c
index e0bb131a33d7..3d59dac063ac 100644
--- a/drivers/net/hamradio/scc.c
+++ b/drivers/net/hamradio/scc.c
@@ -1563,9 +1563,6 @@ static void scc_net_setup(struct net_device *dev)
dev->netdev_ops = &scc_netdev_ops;
dev->header_ops = &ax25_header_ops;
- memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN);
- memcpy(dev->dev_addr, &ax25_defaddr, AX25_ADDR_LEN);
-
dev->flags = 0;
dev->type = ARPHRD_AX25;
@@ -1573,6 +1570,8 @@ static void scc_net_setup(struct net_device *dev)
dev->mtu = AX25_DEF_PACLEN;
dev->addr_len = AX25_ADDR_LEN;
+ memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN);
+ dev_addr_set(dev, (u8 *)&ax25_defaddr);
}
/* ----> open network device <---- */
@@ -1951,7 +1950,7 @@ static int scc_net_siocdevprivate(struct net_device *dev,
static int scc_net_set_mac_address(struct net_device *dev, void *addr)
{
struct sockaddr *sa = (struct sockaddr *) addr;
- memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
+ dev_addr_set(dev, sa->sa_data);
return 0;
}
diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c
index 6ddacbdb224b..6376b8485976 100644
--- a/drivers/net/hamradio/yam.c
+++ b/drivers/net/hamradio/yam.c
@@ -1063,7 +1063,7 @@ static int yam_set_mac_address(struct net_device *dev, void *addr)
struct sockaddr *sa = (struct sockaddr *) addr;
/* addr is an AX.25 shifted ASCII mac address */
- memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
+ dev_addr_set(dev, sa->sa_data);
return 0;
}
@@ -1107,7 +1107,7 @@ static void yam_setup(struct net_device *dev)
dev->mtu = AX25_MTU;
dev->addr_len = AX25_ADDR_LEN;
memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN);
- memcpy(dev->dev_addr, &ax25_defaddr, AX25_ADDR_LEN);
+ dev_addr_set(dev, (u8 *)&ax25_defaddr);
}
static int __init yam_init_driver(void)
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 382bebc2420d..7e66ae1d2a59 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -803,6 +803,7 @@ void netvsc_linkstatus_callback(struct net_device *net,
schedule_delayed_work(&ndev_ctx->dwork, 0);
}
+/* This function should only be called after skb_record_rx_queue() */
static void netvsc_xdp_xmit(struct sk_buff *skb, struct net_device *ndev)
{
int rc;
@@ -2536,7 +2537,7 @@ static int netvsc_probe(struct hv_device *dev,
goto rndis_failed;
}
- memcpy(net->dev_addr, device_info->mac_adr, ETH_ALEN);
+ eth_hw_addr_set(net, device_info->mac_adr);
/* We must get rtnl lock before scheduling nvdev->subchan_work,
* otherwise netvsc_subchan_work() can get rtnl lock first and wait
@@ -2742,8 +2743,7 @@ static int netvsc_netdev_event(struct notifier_block *this,
return NOTIFY_DONE;
/* Avoid Bonding master dev with same MAC registering as VF */
- if ((event_dev->priv_flags & IFF_BONDING) &&
- (event_dev->flags & IFF_MASTER))
+ if (netif_is_bond_master(event_dev))
return NOTIFY_DONE;
switch (event) {
diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c
index 3a2824f24caa..ece6ff6049f6 100644
--- a/drivers/net/ieee802154/ca8210.c
+++ b/drivers/net/ieee802154/ca8210.c
@@ -2938,9 +2938,7 @@ static int ca8210_dev_com_init(struct ca8210_priv *priv)
*/
static void ca8210_dev_com_clear(struct ca8210_priv *priv)
{
- flush_workqueue(priv->mlme_workqueue);
destroy_workqueue(priv->mlme_workqueue);
- flush_workqueue(priv->irq_workqueue);
destroy_workqueue(priv->irq_workqueue);
}
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
index e9258a9f3702..2c319dd27f29 100644
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -31,6 +31,7 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/moduleparam.h>
+#include <linux/netfilter_netdev.h>
#include <net/pkt_sched.h>
#include <net/net_namespace.h>
@@ -75,8 +76,10 @@ static void ifb_ri_tasklet(struct tasklet_struct *t)
}
while ((skb = __skb_dequeue(&txp->tq)) != NULL) {
+ /* Skip tc and netfilter to prevent redirection loop. */
skb->redirected = 0;
skb->tc_skip_classify = 1;
+ nf_skip_egress(skb, true);
u64_stats_update_begin(&txp->tsync);
txp->tx_packets++;
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index c0b21a5580d5..1d2f4e7d7324 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -579,7 +579,7 @@ int ipvlan_link_new(struct net *src_net, struct net_device *dev,
* world but keep using the physical-dev address for the outgoing
* packets.
*/
- memcpy(dev->dev_addr, phy_dev->dev_addr, ETH_ALEN);
+ eth_hw_addr_set(dev, phy_dev->dev_addr);
dev->priv_flags |= IFF_NO_RX_HANDLER;
@@ -787,7 +787,7 @@ static int ipvlan_device_event(struct notifier_block *unused,
case NETDEV_CHANGEADDR:
list_for_each_entry(ipvlan, &port->ipvlans, pnode) {
- ether_addr_copy(ipvlan->dev->dev_addr, dev->dev_addr);
+ eth_hw_addr_set(ipvlan->dev, dev->dev_addr);
call_netdevice_notifiers(NETDEV_CHANGEADDR, ipvlan->dev);
}
break;
diff --git a/drivers/net/ipvlan/ipvtap.c b/drivers/net/ipvlan/ipvtap.c
index 1cedb634f4f7..ef02f2cf5ce1 100644
--- a/drivers/net/ipvlan/ipvtap.c
+++ b/drivers/net/ipvlan/ipvtap.c
@@ -162,7 +162,7 @@ static int ipvtap_device_event(struct notifier_block *unused,
devt = MKDEV(MAJOR(ipvtap_major), vlantap->tap.minor);
classdev = device_create(&ipvtap_class, &dev->dev, devt,
- dev, tap_name);
+ dev, "%s", tap_name);
if (IS_ERR(classdev)) {
tap_free_minor(ipvtap_major, &vlantap->tap);
return notifier_from_errno(PTR_ERR(classdev));
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 93dc48b9b4f2..18b6dba9394e 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -3614,7 +3614,7 @@ static int macsec_set_mac_address(struct net_device *dev, void *p)
dev_uc_del(real_dev, dev->dev_addr);
out:
- ether_addr_copy(dev->dev_addr, addr->sa_data);
+ eth_hw_addr_set(dev, addr->sa_data);
macsec->secy.sci = dev_to_sci(dev, MACSEC_PORT_ES);
/* If h/w offloading is available, propagate to the device */
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 35f46ad040b0..6189acb33973 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -202,7 +202,7 @@ static void macvlan_hash_change_addr(struct macvlan_dev *vlan,
/* Now that we are unhashed it is safe to change the device
* address without confusing packet delivery.
*/
- memcpy(vlan->dev->dev_addr, addr, ETH_ALEN);
+ eth_hw_addr_set(vlan->dev, addr);
macvlan_hash_add(vlan);
}
@@ -707,7 +707,7 @@ static int macvlan_sync_address(struct net_device *dev, unsigned char *addr)
if (!(dev->flags & IFF_UP)) {
/* Just copy in the new address */
- ether_addr_copy(dev->dev_addr, addr);
+ eth_hw_addr_set(dev, addr);
} else {
/* Rehash and update the device filters */
if (macvlan_addr_busy(vlan->port, addr))
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 694e2f5dbbe5..6b12902a803f 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -169,7 +169,7 @@ static int macvtap_device_event(struct notifier_block *unused,
devt = MKDEV(MAJOR(macvtap_major), vlantap->tap.minor);
classdev = device_create(&macvtap_class, &dev->dev, devt,
- dev, tap_name);
+ dev, "%s", tap_name);
if (IS_ERR(classdev)) {
tap_free_minor(macvtap_major, &vlantap->tap);
return notifier_from_errno(PTR_ERR(classdev));
diff --git a/drivers/net/net_failover.c b/drivers/net/net_failover.c
index 2a4892402ed8..86ec5aae4289 100644
--- a/drivers/net/net_failover.c
+++ b/drivers/net/net_failover.c
@@ -748,8 +748,7 @@ struct failover *net_failover_create(struct net_device *standby_dev)
failover_dev->hw_features |= NETIF_F_GSO_ENCAP_ALL;
failover_dev->features |= failover_dev->hw_features;
- memcpy(failover_dev->dev_addr, standby_dev->dev_addr,
- failover_dev->addr_len);
+ dev_addr_set(failover_dev, standby_dev->dev_addr);
failover_dev->min_mtu = standby_dev->min_mtu;
failover_dev->max_mtu = standby_dev->max_mtu;
diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c
index 54313bd57797..9661aca35703 100644
--- a/drivers/net/netdevsim/dev.c
+++ b/drivers/net/netdevsim/dev.c
@@ -1470,10 +1470,6 @@ int nsim_dev_probe(struct nsim_bus_dev *nsim_bus_dev)
if (err)
goto err_devlink_free;
- err = devlink_register(devlink);
- if (err)
- goto err_resources_unregister;
-
err = devlink_params_register(devlink, nsim_devlink_params,
ARRAY_SIZE(nsim_devlink_params));
if (err)
@@ -1514,9 +1510,9 @@ int nsim_dev_probe(struct nsim_bus_dev *nsim_bus_dev)
if (err)
goto err_psample_exit;
- devlink_params_publish(devlink);
- devlink_reload_enable(devlink);
nsim_dev->esw_mode = DEVLINK_ESWITCH_MODE_LEGACY;
+ devlink_set_features(devlink, DEVLINK_F_RELOAD);
+ devlink_register(devlink);
return 0;
err_psample_exit:
@@ -1537,8 +1533,6 @@ err_params_unregister:
devlink_params_unregister(devlink, nsim_devlink_params,
ARRAY_SIZE(nsim_devlink_params));
err_dl_unregister:
- devlink_unregister(devlink);
-err_resources_unregister:
devlink_resources_unregister(devlink, NULL);
err_devlink_free:
devlink_free(devlink);
@@ -1572,15 +1566,13 @@ void nsim_dev_remove(struct nsim_bus_dev *nsim_bus_dev)
struct nsim_dev *nsim_dev = dev_get_drvdata(&nsim_bus_dev->dev);
struct devlink *devlink = priv_to_devlink(nsim_dev);
- devlink_reload_disable(devlink);
-
+ devlink_unregister(devlink);
nsim_dev_reload_destroy(nsim_dev);
nsim_bpf_dev_exit(nsim_dev);
nsim_dev_debugfs_exit(nsim_dev);
devlink_params_unregister(devlink, nsim_devlink_params,
ARRAY_SIZE(nsim_devlink_params));
- devlink_unregister(devlink);
devlink_resources_unregister(devlink, NULL);
devlink_free(devlink);
}
diff --git a/drivers/net/netdevsim/ethtool.c b/drivers/net/netdevsim/ethtool.c
index b03a0513eb7e..0ab6a40be611 100644
--- a/drivers/net/netdevsim/ethtool.c
+++ b/drivers/net/netdevsim/ethtool.c
@@ -81,6 +81,30 @@ static int nsim_set_ringparam(struct net_device *dev,
return 0;
}
+static void
+nsim_get_channels(struct net_device *dev, struct ethtool_channels *ch)
+{
+ struct netdevsim *ns = netdev_priv(dev);
+
+ ch->max_combined = ns->nsim_bus_dev->num_queues;
+ ch->combined_count = ns->ethtool.channels;
+}
+
+static int
+nsim_set_channels(struct net_device *dev, struct ethtool_channels *ch)
+{
+ struct netdevsim *ns = netdev_priv(dev);
+ int err;
+
+ err = netif_set_real_num_queues(dev, ch->combined_count,
+ ch->combined_count);
+ if (err)
+ return err;
+
+ ns->ethtool.channels = ch->combined_count;
+ return 0;
+}
+
static int
nsim_get_fecparam(struct net_device *dev, struct ethtool_fecparam *fecparam)
{
@@ -118,6 +142,8 @@ static const struct ethtool_ops nsim_ethtool_ops = {
.get_coalesce = nsim_get_coalesce,
.get_ringparam = nsim_get_ringparam,
.set_ringparam = nsim_set_ringparam,
+ .get_channels = nsim_get_channels,
+ .set_channels = nsim_set_channels,
.get_fecparam = nsim_get_fecparam,
.set_fecparam = nsim_set_fecparam,
};
@@ -141,6 +167,8 @@ void nsim_ethtool_init(struct netdevsim *ns)
ns->ethtool.fec.fec = ETHTOOL_FEC_NONE;
ns->ethtool.fec.active_fec = ETHTOOL_FEC_NONE;
+ ns->ethtool.channels = ns->nsim_bus_dev->num_queues;
+
ethtool = debugfs_create_dir("ethtool", ns->nsim_dev_port->ddir);
debugfs_create_u32("get_err", 0600, ethtool, &ns->ethtool.get_err);
diff --git a/drivers/net/netdevsim/health.c b/drivers/net/netdevsim/health.c
index 04aebdf85747..aa77af4a68df 100644
--- a/drivers/net/netdevsim/health.c
+++ b/drivers/net/netdevsim/health.c
@@ -110,26 +110,6 @@ static int nsim_dev_dummy_fmsg_put(struct devlink_fmsg *fmsg, u32 binary_len)
if (err)
return err;
- err = devlink_fmsg_arr_pair_nest_start(fmsg, "test_bool_array");
- if (err)
- return err;
- for (i = 0; i < 10; i++) {
- err = devlink_fmsg_bool_put(fmsg, true);
- if (err)
- return err;
- }
- err = devlink_fmsg_arr_pair_nest_end(fmsg);
- if (err)
- return err;
-
- err = devlink_fmsg_arr_pair_nest_start(fmsg, "test_u8_array");
- if (err)
- return err;
- for (i = 0; i < 10; i++) {
- err = devlink_fmsg_u8_put(fmsg, i);
- if (err)
- return err;
- }
err = devlink_fmsg_arr_pair_nest_end(fmsg);
if (err)
return err;
@@ -146,18 +126,6 @@ static int nsim_dev_dummy_fmsg_put(struct devlink_fmsg *fmsg, u32 binary_len)
if (err)
return err;
- err = devlink_fmsg_arr_pair_nest_start(fmsg, "test_u64_array");
- if (err)
- return err;
- for (i = 0; i < 10; i++) {
- err = devlink_fmsg_u64_put(fmsg, i);
- if (err)
- return err;
- }
- err = devlink_fmsg_arr_pair_nest_end(fmsg);
- if (err)
- return err;
-
err = devlink_fmsg_arr_pair_nest_start(fmsg, "test_array_of_objects");
if (err)
return err;
diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h
index 793c86dc5a9c..d42eec05490f 100644
--- a/drivers/net/netdevsim/netdevsim.h
+++ b/drivers/net/netdevsim/netdevsim.h
@@ -62,6 +62,7 @@ struct nsim_ethtool_pauseparam {
struct nsim_ethtool {
u32 get_err;
u32 set_err;
+ u32 channels;
struct nsim_ethtool_pauseparam pauseparam;
struct ethtool_coalesce coalesce;
struct ethtool_ringparam ring;
diff --git a/drivers/net/ntb_netdev.c b/drivers/net/ntb_netdev.c
index a5bab614ff84..98ca6b18415e 100644
--- a/drivers/net/ntb_netdev.c
+++ b/drivers/net/ntb_netdev.c
@@ -428,7 +428,7 @@ static int ntb_netdev_probe(struct device *client_dev)
ndev->watchdog_timeo = msecs_to_jiffies(NTB_TX_TIMEOUT_MS);
eth_random_addr(ndev->perm_addr);
- memcpy(ndev->dev_addr, ndev->perm_addr, ndev->addr_len);
+ dev_addr_set(ndev, ndev->perm_addr);
ndev->netdev_ops = &ntb_netdev_ops;
ndev->ethtool_ops = &ntb_ethtool_ops;
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index bdac087058b2..69da011e82c8 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -86,15 +86,22 @@
#define AT803X_PSSR 0x11 /*PHY-Specific Status Register*/
#define AT803X_PSSR_MR_AN_COMPLETE 0x0200
-#define AT803X_DEBUG_REG_0 0x00
+#define AT803X_DEBUG_ANALOG_TEST_CTRL 0x00
+#define QCA8327_DEBUG_MANU_CTRL_EN BIT(2)
+#define QCA8337_DEBUG_MANU_CTRL_EN GENMASK(3, 2)
#define AT803X_DEBUG_RX_CLK_DLY_EN BIT(15)
-#define AT803X_DEBUG_REG_5 0x05
+#define AT803X_DEBUG_SYSTEM_CTRL_MODE 0x05
#define AT803X_DEBUG_TX_CLK_DLY_EN BIT(8)
+#define AT803X_DEBUG_REG_HIB_CTRL 0x0b
+#define AT803X_DEBUG_HIB_CTRL_SEL_RST_80U BIT(10)
+#define AT803X_DEBUG_HIB_CTRL_EN_ANY_CHANGE BIT(13)
+
#define AT803X_DEBUG_REG_3C 0x3C
-#define AT803X_DEBUG_REG_3D 0x3D
+#define AT803X_DEBUG_REG_GREEN 0x3D
+#define AT803X_DEBUG_GATE_CLK_IN1000 BIT(6)
#define AT803X_DEBUG_REG_1F 0x1F
#define AT803X_DEBUG_PLL_ON BIT(2)
@@ -150,8 +157,10 @@
#define ATH8035_PHY_ID 0x004dd072
#define AT8030_PHY_ID_MASK 0xffffffef
-#define QCA8327_PHY_ID 0x004dd034
+#define QCA8327_A_PHY_ID 0x004dd033
+#define QCA8327_B_PHY_ID 0x004dd034
#define QCA8337_PHY_ID 0x004dd036
+#define QCA9561_PHY_ID 0x004dd042
#define QCA8K_PHY_ID_MASK 0xffffffff
#define QCA8K_DEVFLAGS_REVISION_MASK GENMASK(2, 0)
@@ -276,25 +285,25 @@ static int at803x_read_page(struct phy_device *phydev)
static int at803x_enable_rx_delay(struct phy_device *phydev)
{
- return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_0, 0,
+ return at803x_debug_reg_mask(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL, 0,
AT803X_DEBUG_RX_CLK_DLY_EN);
}
static int at803x_enable_tx_delay(struct phy_device *phydev)
{
- return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_5, 0,
+ return at803x_debug_reg_mask(phydev, AT803X_DEBUG_SYSTEM_CTRL_MODE, 0,
AT803X_DEBUG_TX_CLK_DLY_EN);
}
static int at803x_disable_rx_delay(struct phy_device *phydev)
{
- return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_0,
+ return at803x_debug_reg_mask(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL,
AT803X_DEBUG_RX_CLK_DLY_EN, 0);
}
static int at803x_disable_tx_delay(struct phy_device *phydev)
{
- return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_5,
+ return at803x_debug_reg_mask(phydev, AT803X_DEBUG_SYSTEM_CTRL_MODE,
AT803X_DEBUG_TX_CLK_DLY_EN, 0);
}
@@ -1236,7 +1245,8 @@ static int at803x_cable_test_get_status(struct phy_device *phydev,
int pair, ret;
if (phydev->phy_id == ATH9331_PHY_ID ||
- phydev->phy_id == ATH8032_PHY_ID)
+ phydev->phy_id == ATH8032_PHY_ID ||
+ phydev->phy_id == QCA9561_PHY_ID)
pair_mask = 0x3;
else
pair_mask = 0xf;
@@ -1276,7 +1286,8 @@ static int at803x_cable_test_start(struct phy_device *phydev)
phy_write(phydev, MII_BMCR, BMCR_ANENABLE);
phy_write(phydev, MII_ADVERTISE, ADVERTISE_CSMA);
if (phydev->phy_id != ATH9331_PHY_ID &&
- phydev->phy_id != ATH8032_PHY_ID)
+ phydev->phy_id != ATH8032_PHY_ID &&
+ phydev->phy_id != QCA9561_PHY_ID)
phy_write(phydev, MII_CTRL1000, 0);
/* we do all the (time consuming) work later */
@@ -1292,9 +1303,9 @@ static int qca83xx_config_init(struct phy_device *phydev)
switch (switch_revision) {
case 1:
/* For 100M waveform */
- at803x_debug_reg_write(phydev, AT803X_DEBUG_REG_0, 0x02ea);
+ at803x_debug_reg_write(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL, 0x02ea);
/* Turn on Gigabit clock */
- at803x_debug_reg_write(phydev, AT803X_DEBUG_REG_3D, 0x68a0);
+ at803x_debug_reg_write(phydev, AT803X_DEBUG_REG_GREEN, 0x68a0);
break;
case 2:
@@ -1302,12 +1313,95 @@ static int qca83xx_config_init(struct phy_device *phydev)
fallthrough;
case 4:
phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_AZ_DEBUG, 0x803f);
- at803x_debug_reg_write(phydev, AT803X_DEBUG_REG_3D, 0x6860);
- at803x_debug_reg_write(phydev, AT803X_DEBUG_REG_5, 0x2c46);
+ at803x_debug_reg_write(phydev, AT803X_DEBUG_REG_GREEN, 0x6860);
+ at803x_debug_reg_write(phydev, AT803X_DEBUG_SYSTEM_CTRL_MODE, 0x2c46);
at803x_debug_reg_write(phydev, AT803X_DEBUG_REG_3C, 0x6000);
break;
}
+ /* QCA8327 require DAC amplitude adjustment for 100m set to +6%.
+ * Disable on init and enable only with 100m speed following
+ * qca original source code.
+ */
+ if (phydev->drv->phy_id == QCA8327_A_PHY_ID ||
+ phydev->drv->phy_id == QCA8327_B_PHY_ID)
+ at803x_debug_reg_mask(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL,
+ QCA8327_DEBUG_MANU_CTRL_EN, 0);
+
+ /* Following original QCA sourcecode set port to prefer master */
+ phy_set_bits(phydev, MII_CTRL1000, CTL1000_PREFER_MASTER);
+
+ return 0;
+}
+
+static void qca83xx_link_change_notify(struct phy_device *phydev)
+{
+ /* QCA8337 doesn't require DAC Amplitude adjustement */
+ if (phydev->drv->phy_id == QCA8337_PHY_ID)
+ return;
+
+ /* Set DAC Amplitude adjustment to +6% for 100m on link running */
+ if (phydev->state == PHY_RUNNING) {
+ if (phydev->speed == SPEED_100)
+ at803x_debug_reg_mask(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL,
+ QCA8327_DEBUG_MANU_CTRL_EN,
+ QCA8327_DEBUG_MANU_CTRL_EN);
+ } else {
+ /* Reset DAC Amplitude adjustment */
+ at803x_debug_reg_mask(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL,
+ QCA8327_DEBUG_MANU_CTRL_EN, 0);
+ }
+}
+
+static int qca83xx_resume(struct phy_device *phydev)
+{
+ int ret, val;
+
+ /* Skip reset if not suspended */
+ if (!phydev->suspended)
+ return 0;
+
+ /* Reinit the port, reset values set by suspend */
+ qca83xx_config_init(phydev);
+
+ /* Reset the port on port resume */
+ phy_set_bits(phydev, MII_BMCR, BMCR_RESET | BMCR_ANENABLE);
+
+ /* On resume from suspend the switch execute a reset and
+ * restart auto-negotiation. Wait for reset to complete.
+ */
+ ret = phy_read_poll_timeout(phydev, MII_BMCR, val, !(val & BMCR_RESET),
+ 50000, 600000, true);
+ if (ret)
+ return ret;
+
+ msleep(1);
+
+ return 0;
+}
+
+static int qca83xx_suspend(struct phy_device *phydev)
+{
+ u16 mask = 0;
+
+ /* Only QCA8337 support actual suspend.
+ * QCA8327 cause port unreliability when phy suspend
+ * is set.
+ */
+ if (phydev->drv->phy_id == QCA8337_PHY_ID) {
+ genphy_suspend(phydev);
+ } else {
+ mask |= ~(BMCR_SPEED1000 | BMCR_FULLDPLX);
+ phy_modify(phydev, MII_BMCR, mask, 0);
+ }
+
+ at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_GREEN,
+ AT803X_DEBUG_GATE_CLK_IN1000, 0);
+
+ at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_HIB_CTRL,
+ AT803X_DEBUG_HIB_CTRL_EN_ANY_CHANGE |
+ AT803X_DEBUG_HIB_CTRL_SEL_RST_80U, 0);
+
return 0;
}
@@ -1408,18 +1502,68 @@ static struct phy_driver at803x_driver[] = {
.soft_reset = genphy_soft_reset,
.config_aneg = at803x_config_aneg,
}, {
+ /* Qualcomm Atheros QCA9561 */
+ PHY_ID_MATCH_EXACT(QCA9561_PHY_ID),
+ .name = "Qualcomm Atheros QCA9561 built-in PHY",
+ .suspend = at803x_suspend,
+ .resume = at803x_resume,
+ .flags = PHY_POLL_CABLE_TEST,
+ /* PHY_BASIC_FEATURES */
+ .config_intr = &at803x_config_intr,
+ .handle_interrupt = at803x_handle_interrupt,
+ .cable_test_start = at803x_cable_test_start,
+ .cable_test_get_status = at803x_cable_test_get_status,
+ .read_status = at803x_read_status,
+ .soft_reset = genphy_soft_reset,
+ .config_aneg = at803x_config_aneg,
+}, {
/* QCA8337 */
- .phy_id = QCA8337_PHY_ID,
- .phy_id_mask = QCA8K_PHY_ID_MASK,
- .name = "QCA PHY 8337",
+ .phy_id = QCA8337_PHY_ID,
+ .phy_id_mask = QCA8K_PHY_ID_MASK,
+ .name = "Qualcomm Atheros 8337 internal PHY",
+ /* PHY_GBIT_FEATURES */
+ .link_change_notify = qca83xx_link_change_notify,
+ .probe = at803x_probe,
+ .flags = PHY_IS_INTERNAL,
+ .config_init = qca83xx_config_init,
+ .soft_reset = genphy_soft_reset,
+ .get_sset_count = at803x_get_sset_count,
+ .get_strings = at803x_get_strings,
+ .get_stats = at803x_get_stats,
+ .suspend = qca83xx_suspend,
+ .resume = qca83xx_resume,
+}, {
+ /* QCA8327-A from switch QCA8327-AL1A */
+ .phy_id = QCA8327_A_PHY_ID,
+ .phy_id_mask = QCA8K_PHY_ID_MASK,
+ .name = "Qualcomm Atheros 8327-A internal PHY",
/* PHY_GBIT_FEATURES */
- .probe = at803x_probe,
- .flags = PHY_IS_INTERNAL,
- .config_init = qca83xx_config_init,
- .soft_reset = genphy_soft_reset,
- .get_sset_count = at803x_get_sset_count,
- .get_strings = at803x_get_strings,
- .get_stats = at803x_get_stats,
+ .link_change_notify = qca83xx_link_change_notify,
+ .probe = at803x_probe,
+ .flags = PHY_IS_INTERNAL,
+ .config_init = qca83xx_config_init,
+ .soft_reset = genphy_soft_reset,
+ .get_sset_count = at803x_get_sset_count,
+ .get_strings = at803x_get_strings,
+ .get_stats = at803x_get_stats,
+ .suspend = qca83xx_suspend,
+ .resume = qca83xx_resume,
+}, {
+ /* QCA8327-B from switch QCA8327-BL1A */
+ .phy_id = QCA8327_B_PHY_ID,
+ .phy_id_mask = QCA8K_PHY_ID_MASK,
+ .name = "Qualcomm Atheros 8327-B internal PHY",
+ /* PHY_GBIT_FEATURES */
+ .link_change_notify = qca83xx_link_change_notify,
+ .probe = at803x_probe,
+ .flags = PHY_IS_INTERNAL,
+ .config_init = qca83xx_config_init,
+ .soft_reset = genphy_soft_reset,
+ .get_sset_count = at803x_get_sset_count,
+ .get_strings = at803x_get_strings,
+ .get_stats = at803x_get_stats,
+ .suspend = qca83xx_suspend,
+ .resume = qca83xx_resume,
}, };
module_phy_driver(at803x_driver);
@@ -1430,6 +1574,10 @@ static struct mdio_device_id __maybe_unused atheros_tbl[] = {
{ PHY_ID_MATCH_EXACT(ATH8032_PHY_ID) },
{ PHY_ID_MATCH_EXACT(ATH8035_PHY_ID) },
{ PHY_ID_MATCH_EXACT(ATH9331_PHY_ID) },
+ { PHY_ID_MATCH_EXACT(QCA8337_PHY_ID) },
+ { PHY_ID_MATCH_EXACT(QCA8327_A_PHY_ID) },
+ { PHY_ID_MATCH_EXACT(QCA8327_B_PHY_ID) },
+ { PHY_ID_MATCH_EXACT(QCA9561_PHY_ID) },
{ }
};
diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c
index 27b6a3f507ae..6ceadd2a0082 100644
--- a/drivers/net/phy/bcm7xxx.c
+++ b/drivers/net/phy/bcm7xxx.c
@@ -415,6 +415,190 @@ static int bcm7xxx_28nm_ephy_config_init(struct phy_device *phydev)
return bcm7xxx_28nm_ephy_apd_enable(phydev);
}
+static int bcm7xxx_16nm_ephy_afe_config(struct phy_device *phydev)
+{
+ int tmp, rcalcode, rcalnewcodelp, rcalnewcode11, rcalnewcode11d2;
+
+ /* Reset PHY */
+ tmp = genphy_soft_reset(phydev);
+ if (tmp)
+ return tmp;
+
+ /* Reset AFE and PLL */
+ bcm_phy_write_exp_sel(phydev, 0x0003, 0x0006);
+ /* Clear reset */
+ bcm_phy_write_exp_sel(phydev, 0x0003, 0x0000);
+
+ /* Write PLL/AFE control register to select 54MHz crystal */
+ bcm_phy_write_misc(phydev, 0x0030, 0x0001, 0x0000);
+ bcm_phy_write_misc(phydev, 0x0031, 0x0000, 0x044a);
+
+ /* Change Ka,Kp,Ki to pdiv=1 */
+ bcm_phy_write_misc(phydev, 0x0033, 0x0002, 0x71a1);
+ /* Configuration override */
+ bcm_phy_write_misc(phydev, 0x0033, 0x0001, 0x8000);
+
+ /* Change PLL_NDIV and PLL_NUDGE */
+ bcm_phy_write_misc(phydev, 0x0031, 0x0001, 0x2f68);
+ bcm_phy_write_misc(phydev, 0x0031, 0x0002, 0x0000);
+
+ /* Reference frequency is 54Mhz, config_mode[15:14] = 3 (low
+ * phase)
+ */
+ bcm_phy_write_misc(phydev, 0x0030, 0x0003, 0xc036);
+
+ /* Initialize bypass mode */
+ bcm_phy_write_misc(phydev, 0x0032, 0x0003, 0x0000);
+ /* Bypass code, default: VCOCLK enabled */
+ bcm_phy_write_misc(phydev, 0x0033, 0x0000, 0x0002);
+ /* LDOs at default setting */
+ bcm_phy_write_misc(phydev, 0x0030, 0x0002, 0x01c0);
+ /* Release PLL reset */
+ bcm_phy_write_misc(phydev, 0x0030, 0x0001, 0x0001);
+
+ /* Bandgap curvature correction to correct default */
+ bcm_phy_write_misc(phydev, 0x0038, 0x0000, 0x0010);
+
+ /* Run RCAL */
+ bcm_phy_write_misc(phydev, 0x0039, 0x0003, 0x0038);
+ bcm_phy_write_misc(phydev, 0x0039, 0x0003, 0x003b);
+ udelay(2);
+ bcm_phy_write_misc(phydev, 0x0039, 0x0003, 0x003f);
+ mdelay(5);
+
+ /* AFE_CAL_CONFIG_0, Vref=1000, Target=10, averaging enabled */
+ bcm_phy_write_misc(phydev, 0x0039, 0x0001, 0x1c82);
+ /* AFE_CAL_CONFIG_0, no reset and analog powerup */
+ bcm_phy_write_misc(phydev, 0x0039, 0x0001, 0x9e82);
+ udelay(2);
+ /* AFE_CAL_CONFIG_0, start calibration */
+ bcm_phy_write_misc(phydev, 0x0039, 0x0001, 0x9f82);
+ udelay(100);
+ /* AFE_CAL_CONFIG_0, clear start calibration, set HiBW */
+ bcm_phy_write_misc(phydev, 0x0039, 0x0001, 0x9e86);
+ udelay(2);
+ /* AFE_CAL_CONFIG_0, start calibration with hi BW mode set */
+ bcm_phy_write_misc(phydev, 0x0039, 0x0001, 0x9f86);
+ udelay(100);
+
+ /* Adjust 10BT amplitude additional +7% and 100BT +2% */
+ bcm_phy_write_misc(phydev, 0x0038, 0x0001, 0xe7ea);
+ /* Adjust 1G mode amplitude and 1G testmode1 */
+ bcm_phy_write_misc(phydev, 0x0038, 0x0002, 0xede0);
+
+ /* Read CORE_EXPA9 */
+ tmp = bcm_phy_read_exp(phydev, 0x00a9);
+ /* CORE_EXPA9[6:1] is rcalcode[5:0] */
+ rcalcode = (tmp & 0x7e) / 2;
+ /* Correct RCAL code + 1 is -1% rprogr, LP: +16 */
+ rcalnewcodelp = rcalcode + 16;
+ /* Correct RCAL code + 1 is -15 rprogr, 11: +10 */
+ rcalnewcode11 = rcalcode + 10;
+ /* Saturate if necessary */
+ if (rcalnewcodelp > 0x3f)
+ rcalnewcodelp = 0x3f;
+ if (rcalnewcode11 > 0x3f)
+ rcalnewcode11 = 0x3f;
+ /* REXT=1 BYP=1 RCAL_st1<5:0>=new rcal code */
+ tmp = 0x00f8 + rcalnewcodelp * 256;
+ /* Program into AFE_CAL_CONFIG_2 */
+ bcm_phy_write_misc(phydev, 0x0039, 0x0003, tmp);
+ /* AFE_BIAS_CONFIG_0 10BT bias code (Bias: E4) */
+ bcm_phy_write_misc(phydev, 0x0038, 0x0001, 0xe7e4);
+ /* invert adc clock output and 'adc refp ldo current To correct
+ * default
+ */
+ bcm_phy_write_misc(phydev, 0x003b, 0x0000, 0x8002);
+ /* 100BT stair case, high BW, 1G stair case, alternate encode */
+ bcm_phy_write_misc(phydev, 0x003c, 0x0003, 0xf882);
+ /* 1000BT DAC transition method per Erol, bits[32], DAC Shuffle
+ * sequence 1 + 10BT imp adjust bits
+ */
+ bcm_phy_write_misc(phydev, 0x003d, 0x0000, 0x3201);
+ /* Non-overlap fix */
+ bcm_phy_write_misc(phydev, 0x003a, 0x0002, 0x0c00);
+
+ /* pwdb override (rxconfig<5>) to turn on RX LDO indpendent of
+ * pwdb controls from DSP_TAP10
+ */
+ bcm_phy_write_misc(phydev, 0x003a, 0x0001, 0x0020);
+
+ /* Remove references to channel 2 and 3 */
+ bcm_phy_write_misc(phydev, 0x003b, 0x0002, 0x0000);
+ bcm_phy_write_misc(phydev, 0x003b, 0x0003, 0x0000);
+
+ /* Set cal_bypassb bit rxconfig<43> */
+ bcm_phy_write_misc(phydev, 0x003a, 0x0003, 0x0800);
+ udelay(2);
+
+ /* Revert pwdb_override (rxconfig<5>) to 0 so that the RX pwr
+ * is controlled by DSP.
+ */
+ bcm_phy_write_misc(phydev, 0x003a, 0x0001, 0x0000);
+
+ /* Drop LSB */
+ rcalnewcode11d2 = (rcalnewcode11 & 0xfffe) / 2;
+ tmp = bcm_phy_read_misc(phydev, 0x003d, 0x0001);
+ /* Clear bits [11:5] */
+ tmp &= ~0xfe0;
+ /* set txcfg_ch0<5>=1 (enable + set local rcal) */
+ tmp |= 0x0020 | (rcalnewcode11d2 * 64);
+ bcm_phy_write_misc(phydev, 0x003d, 0x0001, tmp);
+ bcm_phy_write_misc(phydev, 0x003d, 0x0002, tmp);
+
+ tmp = bcm_phy_read_misc(phydev, 0x003d, 0x0000);
+ /* set txcfg<45:44>=11 (enable Rextra + invert fullscaledetect)
+ */
+ tmp &= ~0x3000;
+ tmp |= 0x3000;
+ bcm_phy_write_misc(phydev, 0x003d, 0x0000, tmp);
+
+ return 0;
+}
+
+static int bcm7xxx_16nm_ephy_config_init(struct phy_device *phydev)
+{
+ int ret, val;
+
+ ret = bcm7xxx_16nm_ephy_afe_config(phydev);
+ if (ret)
+ return ret;
+
+ ret = bcm_phy_set_eee(phydev, true);
+ if (ret)
+ return ret;
+
+ ret = bcm_phy_read_shadow(phydev, BCM54XX_SHD_SCR3);
+ if (ret < 0)
+ return ret;
+
+ val = ret;
+
+ /* Auto power down of DLL enabled,
+ * TXC/RXC disabled during auto power down.
+ */
+ val &= ~BCM54XX_SHD_SCR3_DLLAPD_DIS;
+ val |= BIT(8);
+
+ ret = bcm_phy_write_shadow(phydev, BCM54XX_SHD_SCR3, val);
+ if (ret < 0)
+ return ret;
+
+ return bcm_phy_enable_apd(phydev, true);
+}
+
+static int bcm7xxx_16nm_ephy_resume(struct phy_device *phydev)
+{
+ int ret;
+
+ /* Re-apply workarounds coming out suspend/resume */
+ ret = bcm7xxx_16nm_ephy_config_init(phydev);
+ if (ret)
+ return ret;
+
+ return genphy_config_aneg(phydev);
+}
+
#define MII_BCM7XXX_REG_INVALID 0xff
static u8 bcm7xxx_28nm_ephy_regnum_to_shd(u16 regnum)
@@ -716,9 +900,25 @@ static void bcm7xxx_28nm_remove(struct phy_device *phydev)
.resume = bcm7xxx_config_init, \
}
+#define BCM7XXX_16NM_EPHY(_oui, _name) \
+{ \
+ .phy_id = (_oui), \
+ .phy_id_mask = 0xfffffff0, \
+ .name = _name, \
+ /* PHY_BASIC_FEATURES */ \
+ .flags = PHY_IS_INTERNAL, \
+ .probe = bcm7xxx_28nm_probe, \
+ .remove = bcm7xxx_28nm_remove, \
+ .config_init = bcm7xxx_16nm_ephy_config_init, \
+ .config_aneg = genphy_config_aneg, \
+ .read_status = genphy_read_status, \
+ .resume = bcm7xxx_16nm_ephy_resume, \
+}
+
static struct phy_driver bcm7xxx_driver[] = {
BCM7XXX_28NM_EPHY(PHY_ID_BCM72113, "Broadcom BCM72113"),
BCM7XXX_28NM_EPHY(PHY_ID_BCM72116, "Broadcom BCM72116"),
+ BCM7XXX_16NM_EPHY(PHY_ID_BCM72165, "Broadcom BCM72165"),
BCM7XXX_28NM_GPHY(PHY_ID_BCM7250, "Broadcom BCM7250"),
BCM7XXX_28NM_EPHY(PHY_ID_BCM7255, "Broadcom BCM7255"),
BCM7XXX_28NM_EPHY(PHY_ID_BCM7260, "Broadcom BCM7260"),
@@ -741,6 +941,7 @@ static struct phy_driver bcm7xxx_driver[] = {
static struct mdio_device_id __maybe_unused bcm7xxx_tbl[] = {
{ PHY_ID_BCM72113, 0xfffffff0 },
{ PHY_ID_BCM72116, 0xfffffff0, },
+ { PHY_ID_BCM72165, 0xfffffff0, },
{ PHY_ID_BCM7250, 0xfffffff0, },
{ PHY_ID_BCM7255, 0xfffffff0, },
{ PHY_ID_BCM7260, 0xfffffff0, },
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index 83aea5c5cd03..bb5104ae4610 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -392,10 +392,50 @@ static int bcm54xx_config_init(struct phy_device *phydev)
return 0;
}
+static int bcm54xx_iddq_set(struct phy_device *phydev, bool enable)
+{
+ int ret = 0;
+
+ if (!(phydev->dev_flags & PHY_BRCM_IDDQ_SUSPEND))
+ return ret;
+
+ ret = bcm_phy_read_exp(phydev, BCM54XX_TOP_MISC_IDDQ_CTRL);
+ if (ret < 0)
+ goto out;
+
+ if (enable)
+ ret |= BCM54XX_TOP_MISC_IDDQ_SR | BCM54XX_TOP_MISC_IDDQ_LP;
+ else
+ ret &= ~(BCM54XX_TOP_MISC_IDDQ_SR | BCM54XX_TOP_MISC_IDDQ_LP);
+
+ ret = bcm_phy_write_exp(phydev, BCM54XX_TOP_MISC_IDDQ_CTRL, ret);
+out:
+ return ret;
+}
+
+static int bcm54xx_suspend(struct phy_device *phydev)
+{
+ int ret;
+
+ /* We cannot use a read/modify/write here otherwise the PHY gets into
+ * a bad state where its LEDs keep flashing, thus defeating the purpose
+ * of low power mode.
+ */
+ ret = phy_write(phydev, MII_BMCR, BMCR_PDOWN);
+ if (ret < 0)
+ return ret;
+
+ return bcm54xx_iddq_set(phydev, true);
+}
+
static int bcm54xx_resume(struct phy_device *phydev)
{
int ret;
+ ret = bcm54xx_iddq_set(phydev, false);
+ if (ret < 0)
+ return ret;
+
/* Writes to register other than BMCR would be ignored
* unless we clear the PDOWN bit first
*/
@@ -408,6 +448,15 @@ static int bcm54xx_resume(struct phy_device *phydev)
*/
fsleep(40);
+ /* Issue a soft reset after clearing the power down bit
+ * and before doing any other configuration.
+ */
+ if (phydev->dev_flags & PHY_BRCM_IDDQ_SUSPEND) {
+ ret = genphy_soft_reset(phydev);
+ if (ret < 0)
+ return ret;
+ }
+
return bcm54xx_config_init(phydev);
}
@@ -702,6 +751,36 @@ static void bcm54xx_get_stats(struct phy_device *phydev,
bcm_phy_get_stats(phydev, priv->stats, stats, data);
}
+static void bcm54xx_link_change_notify(struct phy_device *phydev)
+{
+ u16 mask = MII_BCM54XX_EXP_EXP08_EARLY_DAC_WAKE |
+ MII_BCM54XX_EXP_EXP08_FORCE_DAC_WAKE;
+ int ret;
+
+ if (phydev->state != PHY_RUNNING)
+ return;
+
+ /* Don't change the DAC wake settings if auto power down
+ * is not requested.
+ */
+ if (!(phydev->dev_flags & PHY_BRCM_AUTO_PWRDWN_ENABLE))
+ return;
+
+ ret = bcm_phy_read_exp(phydev, MII_BCM54XX_EXP_EXP08);
+ if (ret < 0)
+ return;
+
+ /* Enable/disable 10BaseT auto and forced early DAC wake depending
+ * on the negotiated speed, those settings should only be done
+ * for 10Mbits/sec.
+ */
+ if (phydev->speed == SPEED_10)
+ ret |= mask;
+ else
+ ret &= ~mask;
+ bcm_phy_write_exp(phydev, MII_BCM54XX_EXP_EXP08, ret);
+}
+
static struct phy_driver broadcom_drivers[] = {
{
.phy_id = PHY_ID_BCM5411,
@@ -715,6 +794,7 @@ static struct phy_driver broadcom_drivers[] = {
.config_init = bcm54xx_config_init,
.config_intr = bcm_phy_config_intr,
.handle_interrupt = bcm_phy_handle_interrupt,
+ .link_change_notify = bcm54xx_link_change_notify,
}, {
.phy_id = PHY_ID_BCM5421,
.phy_id_mask = 0xfffffff0,
@@ -727,6 +807,7 @@ static struct phy_driver broadcom_drivers[] = {
.config_init = bcm54xx_config_init,
.config_intr = bcm_phy_config_intr,
.handle_interrupt = bcm_phy_handle_interrupt,
+ .link_change_notify = bcm54xx_link_change_notify,
}, {
.phy_id = PHY_ID_BCM54210E,
.phy_id_mask = 0xfffffff0,
@@ -739,6 +820,9 @@ static struct phy_driver broadcom_drivers[] = {
.config_init = bcm54xx_config_init,
.config_intr = bcm_phy_config_intr,
.handle_interrupt = bcm_phy_handle_interrupt,
+ .link_change_notify = bcm54xx_link_change_notify,
+ .suspend = bcm54xx_suspend,
+ .resume = bcm54xx_resume,
}, {
.phy_id = PHY_ID_BCM5461,
.phy_id_mask = 0xfffffff0,
@@ -751,6 +835,7 @@ static struct phy_driver broadcom_drivers[] = {
.config_init = bcm54xx_config_init,
.config_intr = bcm_phy_config_intr,
.handle_interrupt = bcm_phy_handle_interrupt,
+ .link_change_notify = bcm54xx_link_change_notify,
}, {
.phy_id = PHY_ID_BCM54612E,
.phy_id_mask = 0xfffffff0,
@@ -763,6 +848,7 @@ static struct phy_driver broadcom_drivers[] = {
.config_init = bcm54xx_config_init,
.config_intr = bcm_phy_config_intr,
.handle_interrupt = bcm_phy_handle_interrupt,
+ .link_change_notify = bcm54xx_link_change_notify,
}, {
.phy_id = PHY_ID_BCM54616S,
.phy_id_mask = 0xfffffff0,
@@ -774,6 +860,7 @@ static struct phy_driver broadcom_drivers[] = {
.handle_interrupt = bcm_phy_handle_interrupt,
.read_status = bcm54616s_read_status,
.probe = bcm54616s_probe,
+ .link_change_notify = bcm54xx_link_change_notify,
}, {
.phy_id = PHY_ID_BCM5464,
.phy_id_mask = 0xfffffff0,
@@ -788,6 +875,7 @@ static struct phy_driver broadcom_drivers[] = {
.handle_interrupt = bcm_phy_handle_interrupt,
.suspend = genphy_suspend,
.resume = genphy_resume,
+ .link_change_notify = bcm54xx_link_change_notify,
}, {
.phy_id = PHY_ID_BCM5481,
.phy_id_mask = 0xfffffff0,
@@ -801,6 +889,7 @@ static struct phy_driver broadcom_drivers[] = {
.config_aneg = bcm5481_config_aneg,
.config_intr = bcm_phy_config_intr,
.handle_interrupt = bcm_phy_handle_interrupt,
+ .link_change_notify = bcm54xx_link_change_notify,
}, {
.phy_id = PHY_ID_BCM54810,
.phy_id_mask = 0xfffffff0,
@@ -814,8 +903,9 @@ static struct phy_driver broadcom_drivers[] = {
.config_aneg = bcm5481_config_aneg,
.config_intr = bcm_phy_config_intr,
.handle_interrupt = bcm_phy_handle_interrupt,
- .suspend = genphy_suspend,
+ .suspend = bcm54xx_suspend,
.resume = bcm54xx_resume,
+ .link_change_notify = bcm54xx_link_change_notify,
}, {
.phy_id = PHY_ID_BCM54811,
.phy_id_mask = 0xfffffff0,
@@ -829,8 +919,9 @@ static struct phy_driver broadcom_drivers[] = {
.config_aneg = bcm5481_config_aneg,
.config_intr = bcm_phy_config_intr,
.handle_interrupt = bcm_phy_handle_interrupt,
- .suspend = genphy_suspend,
+ .suspend = bcm54xx_suspend,
.resume = bcm54xx_resume,
+ .link_change_notify = bcm54xx_link_change_notify,
}, {
.phy_id = PHY_ID_BCM5482,
.phy_id_mask = 0xfffffff0,
@@ -843,6 +934,7 @@ static struct phy_driver broadcom_drivers[] = {
.config_init = bcm54xx_config_init,
.config_intr = bcm_phy_config_intr,
.handle_interrupt = bcm_phy_handle_interrupt,
+ .link_change_notify = bcm54xx_link_change_notify,
}, {
.phy_id = PHY_ID_BCM50610,
.phy_id_mask = 0xfffffff0,
@@ -855,6 +947,9 @@ static struct phy_driver broadcom_drivers[] = {
.config_init = bcm54xx_config_init,
.config_intr = bcm_phy_config_intr,
.handle_interrupt = bcm_phy_handle_interrupt,
+ .link_change_notify = bcm54xx_link_change_notify,
+ .suspend = bcm54xx_suspend,
+ .resume = bcm54xx_resume,
}, {
.phy_id = PHY_ID_BCM50610M,
.phy_id_mask = 0xfffffff0,
@@ -867,6 +962,9 @@ static struct phy_driver broadcom_drivers[] = {
.config_init = bcm54xx_config_init,
.config_intr = bcm_phy_config_intr,
.handle_interrupt = bcm_phy_handle_interrupt,
+ .link_change_notify = bcm54xx_link_change_notify,
+ .suspend = bcm54xx_suspend,
+ .resume = bcm54xx_resume,
}, {
.phy_id = PHY_ID_BCM57780,
.phy_id_mask = 0xfffffff0,
@@ -879,6 +977,7 @@ static struct phy_driver broadcom_drivers[] = {
.config_init = bcm54xx_config_init,
.config_intr = bcm_phy_config_intr,
.handle_interrupt = bcm_phy_handle_interrupt,
+ .link_change_notify = bcm54xx_link_change_notify,
}, {
.phy_id = PHY_ID_BCMAC131,
.phy_id_mask = 0xfffffff0,
@@ -905,6 +1004,7 @@ static struct phy_driver broadcom_drivers[] = {
.get_strings = bcm_phy_get_strings,
.get_stats = bcm54xx_get_stats,
.probe = bcm54xx_phy_probe,
+ .link_change_notify = bcm54xx_link_change_notify,
}, {
.phy_id = PHY_ID_BCM53125,
.phy_id_mask = 0xfffffff0,
@@ -918,6 +1018,7 @@ static struct phy_driver broadcom_drivers[] = {
.config_init = bcm54xx_config_init,
.config_intr = bcm_phy_config_intr,
.handle_interrupt = bcm_phy_handle_interrupt,
+ .link_change_notify = bcm54xx_link_change_notify,
}, {
.phy_id = PHY_ID_BCM89610,
.phy_id_mask = 0xfffffff0,
@@ -930,6 +1031,7 @@ static struct phy_driver broadcom_drivers[] = {
.config_init = bcm54xx_config_init,
.config_intr = bcm_phy_config_intr,
.handle_interrupt = bcm_phy_handle_interrupt,
+ .link_change_notify = bcm54xx_link_change_notify,
} };
module_phy_driver(broadcom_drivers);
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index 6bbc81ad295f..914619f3f0e3 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -619,6 +619,25 @@ static int dp83867_of_init(struct phy_device *phydev)
#else
static int dp83867_of_init(struct phy_device *phydev)
{
+ struct dp83867_private *dp83867 = phydev->priv;
+ u16 delay;
+
+ /* For non-OF device, the RX and TX ID values are either strapped
+ * or take from default value. So, we init RX & TX ID values here
+ * so that the RGMIIDCTL is configured correctly later in
+ * dp83867_config_init();
+ */
+ delay = phy_read_mmd(phydev, DP83867_DEVADDR, DP83867_RGMIIDCTL);
+ dp83867->rx_id_delay = delay & DP83867_RGMII_RX_CLK_DELAY_MAX;
+ dp83867->tx_id_delay = (delay >> DP83867_RGMII_TX_CLK_DELAY_SHIFT) &
+ DP83867_RGMII_TX_CLK_DELAY_MAX;
+
+ /* Per datasheet, IO impedance is default to 50-ohm, so we set the
+ * same here or else the default '0' means highest IO impedance
+ * which is wrong.
+ */
+ dp83867->io_impedance = DP83867_IO_MUX_CFG_IO_IMPEDANCE_MIN / 2;
+
return 0;
}
#endif /* CONFIG_OF_MDIO */
diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c
index bd310e8d5e43..b6fea119fe13 100644
--- a/drivers/net/phy/marvell10g.c
+++ b/drivers/net/phy/marvell10g.c
@@ -22,6 +22,7 @@
* If both the fiber and copper ports are connected, the first to gain
* link takes priority and the other port is completely locked out.
*/
+#include <linux/bitfield.h>
#include <linux/ctype.h>
#include <linux/delay.h>
#include <linux/hwmon.h>
@@ -33,6 +34,8 @@
#define MV_PHY_ALASKA_NBT_QUIRK_MASK 0xfffffffe
#define MV_PHY_ALASKA_NBT_QUIRK_REV (MARVELL_PHY_ID_88X3310 | 0xa)
+#define MV_VERSION(a,b,c,d) ((a) << 24 | (b) << 16 | (c) << 8 | (d))
+
enum {
MV_PMA_FW_VER0 = 0xc011,
MV_PMA_FW_VER1 = 0xc012,
@@ -62,6 +65,15 @@ enum {
MV_PCS_CSCR1_MDIX_MDIX = 0x0020,
MV_PCS_CSCR1_MDIX_AUTO = 0x0060,
+ MV_PCS_DSC1 = 0x8003,
+ MV_PCS_DSC1_ENABLE = BIT(9),
+ MV_PCS_DSC1_10GBT = 0x01c0,
+ MV_PCS_DSC1_1GBR = 0x0038,
+ MV_PCS_DSC1_100BTX = 0x0007,
+ MV_PCS_DSC2 = 0x8004,
+ MV_PCS_DSC2_2P5G = 0xf000,
+ MV_PCS_DSC2_5G = 0x0f00,
+
MV_PCS_CSSR1 = 0x8008,
MV_PCS_CSSR1_SPD1_MASK = 0xc000,
MV_PCS_CSSR1_SPD1_SPD2 = 0xc000,
@@ -125,6 +137,7 @@ enum {
};
struct mv3310_chip {
+ bool (*has_downshift)(struct phy_device *phydev);
void (*init_supported_interfaces)(unsigned long *mask);
int (*get_mactype)(struct phy_device *phydev);
int (*init_interface)(struct phy_device *phydev, int mactype);
@@ -138,6 +151,7 @@ struct mv3310_priv {
DECLARE_BITMAP(supported_interfaces, PHY_INTERFACE_MODE_MAX);
u32 firmware_ver;
+ bool has_downshift;
bool rate_match;
phy_interface_t const_interface;
@@ -330,6 +344,71 @@ static int mv3310_reset(struct phy_device *phydev, u32 unit)
5000, 100000, true);
}
+static int mv3310_get_downshift(struct phy_device *phydev, u8 *ds)
+{
+ struct mv3310_priv *priv = dev_get_drvdata(&phydev->mdio.dev);
+ int val;
+
+ if (!priv->has_downshift)
+ return -EOPNOTSUPP;
+
+ val = phy_read_mmd(phydev, MDIO_MMD_PCS, MV_PCS_DSC1);
+ if (val < 0)
+ return val;
+
+ if (val & MV_PCS_DSC1_ENABLE)
+ /* assume that all fields are the same */
+ *ds = 1 + FIELD_GET(MV_PCS_DSC1_10GBT, (u16)val);
+ else
+ *ds = DOWNSHIFT_DEV_DISABLE;
+
+ return 0;
+}
+
+static int mv3310_set_downshift(struct phy_device *phydev, u8 ds)
+{
+ struct mv3310_priv *priv = dev_get_drvdata(&phydev->mdio.dev);
+ u16 val;
+ int err;
+
+ if (!priv->has_downshift)
+ return -EOPNOTSUPP;
+
+ if (ds == DOWNSHIFT_DEV_DISABLE)
+ return phy_clear_bits_mmd(phydev, MDIO_MMD_PCS, MV_PCS_DSC1,
+ MV_PCS_DSC1_ENABLE);
+
+ /* DOWNSHIFT_DEV_DEFAULT_COUNT is confusing. It looks like it should
+ * set the default settings for the PHY. However, it is used for
+ * "ethtool --set-phy-tunable ethN downshift on". The intention is
+ * to enable downshift at a default number of retries. The default
+ * settings for 88x3310 are for two retries with downshift disabled.
+ * So let's use two retries with downshift enabled.
+ */
+ if (ds == DOWNSHIFT_DEV_DEFAULT_COUNT)
+ ds = 2;
+
+ if (ds > 8)
+ return -E2BIG;
+
+ ds -= 1;
+ val = FIELD_PREP(MV_PCS_DSC2_2P5G, ds);
+ val |= FIELD_PREP(MV_PCS_DSC2_5G, ds);
+ err = phy_modify_mmd(phydev, MDIO_MMD_PCS, MV_PCS_DSC2,
+ MV_PCS_DSC2_2P5G | MV_PCS_DSC2_5G, val);
+ if (err < 0)
+ return err;
+
+ val = MV_PCS_DSC1_ENABLE;
+ val |= FIELD_PREP(MV_PCS_DSC1_10GBT, ds);
+ val |= FIELD_PREP(MV_PCS_DSC1_1GBR, ds);
+ val |= FIELD_PREP(MV_PCS_DSC1_100BTX, ds);
+
+ return phy_modify_mmd(phydev, MDIO_MMD_PCS, MV_PCS_DSC1,
+ MV_PCS_DSC1_ENABLE | MV_PCS_DSC1_10GBT |
+ MV_PCS_DSC1_1GBR | MV_PCS_DSC1_100BTX, val);
+}
+
static int mv3310_get_edpd(struct phy_device *phydev, u16 *edpd)
{
int val;
@@ -448,6 +527,9 @@ static int mv3310_probe(struct phy_device *phydev)
priv->firmware_ver >> 24, (priv->firmware_ver >> 16) & 255,
(priv->firmware_ver >> 8) & 255, priv->firmware_ver & 255);
+ if (chip->has_downshift)
+ priv->has_downshift = chip->has_downshift(phydev);
+
/* Powering down the port when not in use saves about 600mW */
ret = mv3310_power_down(phydev);
if (ret)
@@ -616,7 +698,16 @@ static int mv3310_config_init(struct phy_device *phydev)
}
/* Enable EDPD mode - saving 600mW */
- return mv3310_set_edpd(phydev, ETHTOOL_PHY_EDPD_DFLT_TX_MSECS);
+ err = mv3310_set_edpd(phydev, ETHTOOL_PHY_EDPD_DFLT_TX_MSECS);
+ if (err)
+ return err;
+
+ /* Allow downshift */
+ err = mv3310_set_downshift(phydev, DOWNSHIFT_DEV_DEFAULT_COUNT);
+ if (err && err != -EOPNOTSUPP)
+ return err;
+
+ return 0;
}
static int mv3310_get_features(struct phy_device *phydev)
@@ -886,6 +977,8 @@ static int mv3310_get_tunable(struct phy_device *phydev,
struct ethtool_tunable *tuna, void *data)
{
switch (tuna->id) {
+ case ETHTOOL_PHY_DOWNSHIFT:
+ return mv3310_get_downshift(phydev, data);
case ETHTOOL_PHY_EDPD:
return mv3310_get_edpd(phydev, data);
default:
@@ -897,6 +990,8 @@ static int mv3310_set_tunable(struct phy_device *phydev,
struct ethtool_tunable *tuna, const void *data)
{
switch (tuna->id) {
+ case ETHTOOL_PHY_DOWNSHIFT:
+ return mv3310_set_downshift(phydev, *(u8 *)data);
case ETHTOOL_PHY_EDPD:
return mv3310_set_edpd(phydev, *(u16 *)data);
default:
@@ -904,6 +999,14 @@ static int mv3310_set_tunable(struct phy_device *phydev,
}
}
+static bool mv3310_has_downshift(struct phy_device *phydev)
+{
+ struct mv3310_priv *priv = dev_get_drvdata(&phydev->mdio.dev);
+
+ /* Fails to downshift with firmware older than v0.3.5.0 */
+ return priv->firmware_ver >= MV_VERSION(0,3,5,0);
+}
+
static void mv3310_init_supported_interfaces(unsigned long *mask)
{
__set_bit(PHY_INTERFACE_MODE_SGMII, mask);
@@ -943,6 +1046,7 @@ static void mv2111_init_supported_interfaces(unsigned long *mask)
}
static const struct mv3310_chip mv3310_type = {
+ .has_downshift = mv3310_has_downshift,
.init_supported_interfaces = mv3310_init_supported_interfaces,
.get_mactype = mv3310_get_mactype,
.init_interface = mv3310_init_interface,
@@ -953,6 +1057,7 @@ static const struct mv3310_chip mv3310_type = {
};
static const struct mv3310_chip mv3340_type = {
+ .has_downshift = mv3310_has_downshift,
.init_supported_interfaces = mv3340_init_supported_interfaces,
.get_mactype = mv3310_get_mactype,
.init_interface = mv3340_init_interface,
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 6865d9319197..c204067f1890 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -937,6 +937,28 @@ int mdiobus_modify(struct mii_bus *bus, int addr, u32 regnum, u16 mask, u16 set)
EXPORT_SYMBOL_GPL(mdiobus_modify);
/**
+ * mdiobus_modify_changed - Convenience function for modifying a given mdio
+ * device register and returning if it changed
+ * @bus: the mii_bus struct
+ * @addr: the phy address
+ * @regnum: register number to write
+ * @mask: bit mask of bits to clear
+ * @set: bit mask of bits to set
+ */
+int mdiobus_modify_changed(struct mii_bus *bus, int addr, u32 regnum,
+ u16 mask, u16 set)
+{
+ int err;
+
+ mutex_lock(&bus->mdio_lock);
+ err = __mdiobus_modify_changed(bus, addr, regnum, mask, set);
+ mutex_unlock(&bus->mdio_lock);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(mdiobus_modify_changed);
+
+/**
* mdio_bus_match - determine if given MDIO driver supports the given
* MDIO device
* @dev: target MDIO device
@@ -949,8 +971,14 @@ EXPORT_SYMBOL_GPL(mdiobus_modify);
*/
static int mdio_bus_match(struct device *dev, struct device_driver *drv)
{
+ struct mdio_driver *mdiodrv = to_mdio_driver(drv);
struct mdio_device *mdio = to_mdio_device(dev);
+ /* Both the driver and device must type-match */
+ if (!(mdiodrv->mdiodrv.flags & MDIO_DEVICE_IS_PHY) !=
+ !(mdio->flags & MDIO_DEVICE_FLAG_PHY))
+ return 0;
+
if (of_driver_match_device(dev, drv))
return 1;
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 5c928f827173..44a24b99c894 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -863,9 +863,9 @@ static int ksz9031_config_init(struct phy_device *phydev)
MII_KSZ9031RN_TX_DATA_PAD_SKEW, 4,
tx_data_skews, 4, &update);
- if (update && phydev->interface != PHY_INTERFACE_MODE_RGMII)
+ if (update && !phy_interface_is_rgmii(phydev))
phydev_warn(phydev,
- "*-skew-ps values should be used only with phy-mode = \"rgmii\"\n");
+ "*-skew-ps values should be used only with RGMII PHY modes\n");
/* Silicon Errata Sheet (DS80000691D or DS80000692D):
* When the device links in the 1000BASE-T slave mode only,
@@ -1003,6 +1003,26 @@ static int ksz9131_config_rgmii_delay(struct phy_device *phydev)
txcdll_val);
}
+/* Silicon Errata DS80000693B
+ *
+ * When LEDs are configured in Individual Mode, LED1 is ON in a no-link
+ * condition. Workaround is to set register 0x1e, bit 9, this way LED1 behaves
+ * according to the datasheet (off if there is no link).
+ */
+static int ksz9131_led_errata(struct phy_device *phydev)
+{
+ int reg;
+
+ reg = phy_read_mmd(phydev, 2, 0);
+ if (reg < 0)
+ return reg;
+
+ if (!(reg & BIT(4)))
+ return 0;
+
+ return phy_set_bits(phydev, 0x1e, BIT(9));
+}
+
static int ksz9131_config_init(struct phy_device *phydev)
{
struct device_node *of_node;
@@ -1058,6 +1078,10 @@ static int ksz9131_config_init(struct phy_device *phydev)
if (ret < 0)
return ret;
+ ret = ksz9131_led_errata(phydev);
+ if (ret < 0)
+ return ret;
+
return 0;
}
@@ -1537,6 +1561,65 @@ static int ksz886x_cable_test_get_status(struct phy_device *phydev,
return ret;
}
+#define LAN_EXT_PAGE_ACCESS_CONTROL 0x16
+#define LAN_EXT_PAGE_ACCESS_ADDRESS_DATA 0x17
+#define LAN_EXT_PAGE_ACCESS_CTRL_EP_FUNC 0x4000
+
+#define LAN8804_ALIGN_SWAP 0x4a
+#define LAN8804_ALIGN_TX_A_B_SWAP 0x1
+#define LAN8804_ALIGN_TX_A_B_SWAP_MASK GENMASK(2, 0)
+#define LAN8814_CLOCK_MANAGEMENT 0xd
+#define LAN8814_LINK_QUALITY 0x8e
+
+static int lanphy_read_page_reg(struct phy_device *phydev, int page, u32 addr)
+{
+ u32 data;
+
+ phy_write(phydev, LAN_EXT_PAGE_ACCESS_CONTROL, page);
+ phy_write(phydev, LAN_EXT_PAGE_ACCESS_ADDRESS_DATA, addr);
+ phy_write(phydev, LAN_EXT_PAGE_ACCESS_CONTROL,
+ (page | LAN_EXT_PAGE_ACCESS_CTRL_EP_FUNC));
+ data = phy_read(phydev, LAN_EXT_PAGE_ACCESS_ADDRESS_DATA);
+
+ return data;
+}
+
+static int lanphy_write_page_reg(struct phy_device *phydev, int page, u16 addr,
+ u16 val)
+{
+ phy_write(phydev, LAN_EXT_PAGE_ACCESS_CONTROL, page);
+ phy_write(phydev, LAN_EXT_PAGE_ACCESS_ADDRESS_DATA, addr);
+ phy_write(phydev, LAN_EXT_PAGE_ACCESS_CONTROL,
+ (page | LAN_EXT_PAGE_ACCESS_CTRL_EP_FUNC));
+
+ val = phy_write(phydev, LAN_EXT_PAGE_ACCESS_ADDRESS_DATA, val);
+ if (val) {
+ phydev_err(phydev, "Error: phy_write has returned error %d\n",
+ val);
+ return val;
+ }
+ return 0;
+}
+
+static int lan8804_config_init(struct phy_device *phydev)
+{
+ int val;
+
+ /* MDI-X setting for swap A,B transmit */
+ val = lanphy_read_page_reg(phydev, 2, LAN8804_ALIGN_SWAP);
+ val &= ~LAN8804_ALIGN_TX_A_B_SWAP_MASK;
+ val |= LAN8804_ALIGN_TX_A_B_SWAP;
+ lanphy_write_page_reg(phydev, 2, LAN8804_ALIGN_SWAP, val);
+
+ /* Make sure that the PHY will not stop generating the clock when the
+ * link partner goes down
+ */
+ lanphy_write_page_reg(phydev, 31, LAN8814_CLOCK_MANAGEMENT, 0x27e);
+ lanphy_read_page_reg(phydev, 1, LAN8814_LINK_QUALITY);
+
+ return 0;
+}
+
static struct phy_driver ksphy_driver[] = {
{
.phy_id = PHY_ID_KS8737,
@@ -1593,8 +1676,9 @@ static struct phy_driver ksphy_driver[] = {
.get_sset_count = kszphy_get_sset_count,
.get_strings = kszphy_get_strings,
.get_stats = kszphy_get_stats,
- .suspend = genphy_suspend,
- .resume = genphy_resume,
+ /* No suspend/resume callbacks because of errata DS80000700A,
+ * receiver error following software power down.
+ */
}, {
.phy_id = PHY_ID_KSZ8041RNLI,
.phy_id_mask = MICREL_PHY_ID_MASK,
@@ -1719,6 +1803,20 @@ static struct phy_driver ksphy_driver[] = {
.suspend = genphy_suspend,
.resume = kszphy_resume,
}, {
+ .phy_id = PHY_ID_LAN8804,
+ .phy_id_mask = MICREL_PHY_ID_MASK,
+ .name = "Microchip LAN966X Gigabit PHY",
+ .config_init = lan8804_config_init,
+ .driver_data = &ksz9021_type,
+ .probe = kszphy_probe,
+ .soft_reset = genphy_soft_reset,
+ .read_status = ksz9031_read_status,
+ .get_sset_count = kszphy_get_sset_count,
+ .get_strings = kszphy_get_strings,
+ .get_stats = kszphy_get_stats,
+ .suspend = genphy_suspend,
+ .resume = kszphy_resume,
+}, {
.phy_id = PHY_ID_KSZ9131,
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Microchip KSZ9131 Gigabit PHY",
@@ -1794,6 +1892,7 @@ static struct mdio_device_id __maybe_unused micrel_tbl[] = {
{ PHY_ID_KSZ8873MLL, MICREL_PHY_ID_MASK },
{ PHY_ID_KSZ886X, MICREL_PHY_ID_MASK },
{ PHY_ID_LAN8814, MICREL_PHY_ID_MASK },
+ { PHY_ID_LAN8804, MICREL_PHY_ID_MASK },
{ }
};
diff --git a/drivers/net/phy/mscc/mscc_main.c b/drivers/net/phy/mscc/mscc_main.c
index 6e32da28e138..ebfeeb3c67c1 100644
--- a/drivers/net/phy/mscc/mscc_main.c
+++ b/drivers/net/phy/mscc/mscc_main.c
@@ -273,12 +273,12 @@ static int vsc85xx_downshift_set(struct phy_device *phydev, u8 count)
static int vsc85xx_wol_set(struct phy_device *phydev,
struct ethtool_wolinfo *wol)
{
+ const u8 *mac_addr = phydev->attached_dev->dev_addr;
int rc;
u16 reg_val;
u8 i;
u16 pwd[3] = {0, 0, 0};
struct ethtool_wolinfo *wol_conf = wol;
- u8 *mac_addr = phydev->attached_dev->dev_addr;
mutex_lock(&phydev->lock);
rc = phy_select_page(phydev, MSCC_PHY_PAGE_EXTENDED_2);
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 4f9990b47a37..74d8e1dc125f 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -3149,6 +3149,16 @@ int phy_driver_register(struct phy_driver *new_driver, struct module *owner)
return -EINVAL;
}
+ /* PHYLIB device drivers must not match using a DT compatible table
+ * as this bypasses our checks that the mdiodev that is being matched
+ * is backed by a struct phy_device. If such a case happens, we will
+ * make out-of-bounds accesses and lockup in phydev->lock.
+ */
+ if (WARN(new_driver->mdiodrv.driver.of_match_table,
+ "%s: driver must not provide a DT match table\n",
+ new_driver->name))
+ return -EINVAL;
+
new_driver->mdiodrv.flags |= MDIO_DEVICE_IS_PHY;
new_driver->mdiodrv.driver.name = new_driver->name;
new_driver->mdiodrv.driver.bus = &mdio_bus_type;
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index 0a0abe8e4be0..7e93d81fa5ad 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -132,6 +132,17 @@ void phylink_set_port_modes(unsigned long *mask)
}
EXPORT_SYMBOL_GPL(phylink_set_port_modes);
+void phylink_set_10g_modes(unsigned long *mask)
+{
+ phylink_set(mask, 10000baseT_Full);
+ phylink_set(mask, 10000baseCR_Full);
+ phylink_set(mask, 10000baseSR_Full);
+ phylink_set(mask, 10000baseLR_Full);
+ phylink_set(mask, 10000baseLRM_Full);
+ phylink_set(mask, 10000baseER_Full);
+}
+EXPORT_SYMBOL_GPL(phylink_set_10g_modes);
+
static int phylink_is_empty_linkmode(const unsigned long *linkmode)
{
__ETHTOOL_DECLARE_LINK_MODE_MASK(tmp) = { 0, };
@@ -540,9 +551,15 @@ static void phylink_mac_pcs_get_state(struct phylink *pl,
linkmode_zero(state->lp_advertising);
state->interface = pl->link_config.interface;
state->an_enabled = pl->link_config.an_enabled;
- state->speed = SPEED_UNKNOWN;
- state->duplex = DUPLEX_UNKNOWN;
- state->pause = MLO_PAUSE_NONE;
+ if (state->an_enabled) {
+ state->speed = SPEED_UNKNOWN;
+ state->duplex = DUPLEX_UNKNOWN;
+ state->pause = MLO_PAUSE_NONE;
+ } else {
+ state->speed = pl->link_config.speed;
+ state->duplex = pl->link_config.duplex;
+ state->pause = pl->link_config.pause;
+ }
state->an_complete = 0;
state->link = 1;
@@ -1333,7 +1350,10 @@ void phylink_suspend(struct phylink *pl, bool mac_wol)
* but one would hope all packets have been sent. This
* also means phylink_resolve() will do nothing.
*/
- netif_carrier_off(pl->netdev);
+ if (pl->netdev)
+ netif_carrier_off(pl->netdev);
+ else
+ pl->old_link_state = false;
/* We do not call mac_link_down() here as we want the
* link to remain up to receive the WoL packets.
@@ -1598,20 +1618,11 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
linkmode_mod_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, config.advertising,
config.an_enabled);
- /* Validate without changing the current supported mask. */
- linkmode_copy(support, pl->supported);
- if (phylink_validate(pl, support, &config))
- return -EINVAL;
-
- /* If autonegotiation is enabled, we must have an advertisement */
- if (config.an_enabled && phylink_is_empty_linkmode(config.advertising))
- return -EINVAL;
-
/* If this link is with an SFP, ensure that changes to advertised modes
* also cause the associated interface to be selected such that the
* link can be configured correctly.
*/
- if (pl->sfp_port && pl->sfp_bus) {
+ if (pl->sfp_bus) {
config.interface = sfp_select_interface(pl->sfp_bus,
config.advertising);
if (config.interface == PHY_INTERFACE_MODE_NA) {
@@ -1631,8 +1642,17 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
__ETHTOOL_LINK_MODE_MASK_NBITS, support);
return -EINVAL;
}
+ } else {
+ /* Validate without changing the current supported mask. */
+ linkmode_copy(support, pl->supported);
+ if (phylink_validate(pl, support, &config))
+ return -EINVAL;
}
+ /* If autonegotiation is enabled, we must have an advertisement */
+ if (config.an_enabled && phylink_is_empty_linkmode(config.advertising))
+ return -EINVAL;
+
mutex_lock(&pl->state_mutex);
pl->link_config.speed = config.speed;
pl->link_config.duplex = config.duplex;
@@ -2535,7 +2555,10 @@ void phylink_mii_c22_pcs_get_state(struct mdio_device *pcs,
state->link = !!(bmsr & BMSR_LSTATUS);
state->an_complete = !!(bmsr & BMSR_ANEGCOMPLETE);
- if (!state->link)
+ /* If there is no link or autonegotiation is disabled, the LP advertisement
+ * data is not meaningful, so don't go any further.
+ */
+ if (!state->link || !state->an_enabled)
return;
switch (state->interface) {
@@ -2582,7 +2605,6 @@ int phylink_mii_c22_pcs_set_advertisement(struct mdio_device *pcs,
{
struct mii_bus *bus = pcs->bus;
int addr = pcs->addr;
- int val, ret;
u16 adv;
switch (interface) {
@@ -2596,32 +2618,12 @@ int phylink_mii_c22_pcs_set_advertisement(struct mdio_device *pcs,
advertising))
adv |= ADVERTISE_1000XPSE_ASYM;
- val = mdiobus_read(bus, addr, MII_ADVERTISE);
- if (val < 0)
- return val;
-
- if (val == adv)
- return 0;
-
- ret = mdiobus_write(bus, addr, MII_ADVERTISE, adv);
- if (ret < 0)
- return ret;
-
- return 1;
+ return mdiobus_modify_changed(bus, addr, MII_ADVERTISE,
+ 0xffff, adv);
case PHY_INTERFACE_MODE_SGMII:
- val = mdiobus_read(bus, addr, MII_ADVERTISE);
- if (val < 0)
- return val;
-
- if (val == 0x0001)
- return 0;
-
- ret = mdiobus_write(bus, addr, MII_ADVERTISE, 0x0001);
- if (ret < 0)
- return ret;
-
- return 1;
+ return mdiobus_modify_changed(bus, addr, MII_ADVERTISE,
+ 0xffff, 0x0001);
default:
/* Nothing to do for other modes */
@@ -2658,7 +2660,12 @@ int phylink_mii_c22_pcs_config(struct mdio_device *pcs, unsigned int mode,
changed = ret > 0;
/* Ensure ISOLATE bit is disabled */
- bmcr = mode == MLO_AN_INBAND ? BMCR_ANENABLE : 0;
+ if (mode == MLO_AN_INBAND &&
+ linkmode_test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, advertising))
+ bmcr = BMCR_ANENABLE;
+ else
+ bmcr = 0;
+
ret = mdiobus_modify(pcs->bus, pcs->addr, MII_BMCR,
BMCR_ANENABLE | BMCR_ISOLATE, bmcr);
if (ret < 0)
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index 11be60333fa8..a5671ab896b3 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -1023,6 +1023,14 @@ static struct phy_driver realtek_drvs[] = {
.resume = genphy_resume,
.read_page = rtl821x_read_page,
.write_page = rtl821x_write_page,
+ }, {
+ PHY_ID_MATCH_EXACT(0x001cc942),
+ .name = "RTL8365MB-VC Gigabit Ethernet",
+ /* Interrupt handling analogous to RTL8366RB */
+ .config_intr = genphy_no_config_intr,
+ .handle_interrupt = genphy_handle_interrupt_no_ack,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
},
};
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index fb52cd175b45..1180a0e2445f 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -1161,7 +1161,7 @@ static int ppp_unit_register(struct ppp *ppp, int unit, bool ifname_is_set)
if (!ifname_is_set) {
while (1) {
snprintf(ppp->dev->name, IFNAMSIZ, "ppp%i", ret);
- if (!__dev_get_by_name(ppp->ppp_net, ppp->dev->name))
+ if (!netdev_name_in_use(ppp->ppp_net, ppp->dev->name))
break;
unit_put(&pn->units_idr, ret);
ret = unit_get(&pn->units_idr, ppp, ret + 1);
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index dd7917cab2b1..8b2adc56b92a 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1790,7 +1790,7 @@ static int team_set_mac_address(struct net_device *dev, void *p)
if (dev->type == ARPHRD_ETHER && !is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ dev_addr_set(dev, addr->sa_data);
mutex_lock(&team->lock);
list_for_each_entry(port, &team->port_list, list)
if (team->ops.port_change_dev_addr)
diff --git a/drivers/net/usb/aqc111.c b/drivers/net/usb/aqc111.c
index 73b97f4cc1ec..ea06d10e1c21 100644
--- a/drivers/net/usb/aqc111.c
+++ b/drivers/net/usb/aqc111.c
@@ -119,7 +119,7 @@ static int aqc111_write_cmd_nopm(struct usbnet *dev, u8 cmd, u16 value,
}
static int aqc111_write_cmd(struct usbnet *dev, u8 cmd, u16 value,
- u16 index, u16 size, void *data)
+ u16 index, u16 size, const void *data)
{
int ret;
@@ -714,7 +714,7 @@ static int aqc111_bind(struct usbnet *dev, struct usb_interface *intf)
if (ret)
goto out;
- ether_addr_copy(dev->net->dev_addr, dev->net->perm_addr);
+ eth_hw_addr_set(dev->net, dev->net->perm_addr);
/* Set Rx urb size */
dev->rx_urb_size = URB_SIZE;
diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c
index 38cda590895c..42ba4af68090 100644
--- a/drivers/net/usb/asix_common.c
+++ b/drivers/net/usb/asix_common.c
@@ -791,7 +791,7 @@ int asix_set_mac_address(struct net_device *net, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(net->dev_addr, addr->sa_data, ETH_ALEN);
+ eth_hw_addr_set(net, addr->sa_data);
/* We use the 20 byte dev->data
* for our 6 byte mac buffer
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index 30821f6a6d7a..4514d35ef4c4 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -59,7 +59,7 @@ static void asix_status(struct usbnet *dev, struct urb *urb)
static void asix_set_netdev_dev_addr(struct usbnet *dev, u8 *addr)
{
if (is_valid_ether_addr(addr)) {
- memcpy(dev->net->dev_addr, addr, ETH_ALEN);
+ eth_hw_addr_set(dev->net, addr);
} else {
netdev_info(dev->net, "invalid hw address, using random\n");
eth_hw_addr_random(dev->net);
diff --git a/drivers/net/usb/ax88172a.c b/drivers/net/usb/ax88172a.c
index d9777d9a7c5d..3777c7e2e6fc 100644
--- a/drivers/net/usb/ax88172a.c
+++ b/drivers/net/usb/ax88172a.c
@@ -176,7 +176,7 @@ static int ax88172a_bind(struct usbnet *dev, struct usb_interface *intf)
ret = -EIO;
goto free;
}
- memcpy(dev->net->dev_addr, buf, ETH_ALEN);
+ eth_hw_addr_set(dev->net, buf);
dev->net->netdev_ops = &ax88172a_netdev_ops;
dev->net->ethtool_ops = &ax88172a_ethtool_ops;
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
index f25448a08870..ea8aa8c33241 100644
--- a/drivers/net/usb/ax88179_178a.c
+++ b/drivers/net/usb/ax88179_178a.c
@@ -209,7 +209,7 @@ static int __ax88179_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
}
static int __ax88179_write_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
- u16 size, void *data, int in_pm)
+ u16 size, const void *data, int in_pm)
{
int ret;
int (*fn)(struct usbnet *, u8, u8, u16, u16, const void *, u16);
@@ -272,7 +272,7 @@ static int ax88179_read_cmd_nopm(struct usbnet *dev, u8 cmd, u16 value,
}
static int ax88179_write_cmd_nopm(struct usbnet *dev, u8 cmd, u16 value,
- u16 index, u16 size, void *data)
+ u16 index, u16 size, const void *data)
{
int ret;
@@ -313,7 +313,7 @@ static int ax88179_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
}
static int ax88179_write_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
- u16 size, void *data)
+ u16 size, const void *data)
{
int ret;
@@ -463,7 +463,7 @@ static int ax88179_auto_detach(struct usbnet *dev, int in_pm)
u16 tmp16;
u8 tmp8;
int (*fnr)(struct usbnet *, u8, u16, u16, u16, void *);
- int (*fnw)(struct usbnet *, u8, u16, u16, u16, void *);
+ int (*fnw)(struct usbnet *, u8, u16, u16, u16, const void *);
if (!in_pm) {
fnr = ax88179_read_cmd;
@@ -1015,7 +1015,7 @@ static int ax88179_set_mac_addr(struct net_device *net, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(net->dev_addr, addr->sa_data, ETH_ALEN);
+ eth_hw_addr_set(net, addr->sa_data);
/* Set the MAC address */
ret = ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_NODE_ID, ETH_ALEN,
@@ -1310,7 +1310,7 @@ static void ax88179_get_mac_addr(struct usbnet *dev)
}
if (is_valid_ether_addr(mac)) {
- memcpy(dev->net->dev_addr, mac, ETH_ALEN);
+ eth_hw_addr_set(dev->net, mac);
} else {
netdev_info(dev->net, "invalid MAC address, using random\n");
eth_hw_addr_random(dev->net);
diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c
index 97ba67042d12..24db5768a3c0 100644
--- a/drivers/net/usb/catc.c
+++ b/drivers/net/usb/catc.c
@@ -615,7 +615,7 @@ static void catc_stats_timer(struct timer_list *t)
* Receive modes. Broadcast, Multicast, Promisc.
*/
-static void catc_multicast(unsigned char *addr, u8 *multicast)
+static void catc_multicast(const unsigned char *addr, u8 *multicast)
{
u32 crc;
diff --git a/drivers/net/usb/cdc-phonet.c b/drivers/net/usb/cdc-phonet.c
index e1da9102a540..ad5121e9cf5d 100644
--- a/drivers/net/usb/cdc-phonet.c
+++ b/drivers/net/usb/cdc-phonet.c
@@ -275,6 +275,8 @@ static const struct net_device_ops usbpn_ops = {
static void usbpn_setup(struct net_device *dev)
{
+ const u8 addr = PN_MEDIA_USB;
+
dev->features = 0;
dev->netdev_ops = &usbpn_ops;
dev->header_ops = &phonet_header_ops;
@@ -284,8 +286,8 @@ static void usbpn_setup(struct net_device *dev)
dev->min_mtu = PHONET_MIN_MTU;
dev->max_mtu = PHONET_MAX_MTU;
dev->hard_header_len = 1;
- dev->dev_addr[0] = PN_MEDIA_USB;
dev->addr_len = 1;
+ dev_addr_set(dev, &addr);
dev->tx_queue_len = 3;
dev->needs_free_netdev = true;
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index 907f98b1eefe..48d7d278631e 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -93,7 +93,8 @@ static int dm_write_reg(struct usbnet *dev, u8 reg, u8 value)
value, reg, NULL, 0);
}
-static void dm_write_async(struct usbnet *dev, u8 reg, u16 length, void *data)
+static void dm_write_async(struct usbnet *dev, u8 reg, u16 length,
+ const void *data)
{
usbnet_write_cmd_async(dev, DM_WRITE_REGS,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
@@ -331,7 +332,7 @@ static int dm9601_set_mac_address(struct net_device *net, void *p)
return -EINVAL;
}
- memcpy(net->dev_addr, addr->sa_data, net->addr_len);
+ eth_hw_addr_set(net, addr->sa_data);
__dm9601_set_mac_address(dev);
return 0;
@@ -391,7 +392,7 @@ static int dm9601_bind(struct usbnet *dev, struct usb_interface *intf)
* Overwrite the auto-generated address only with good ones.
*/
if (is_valid_ether_addr(mac))
- memcpy(dev->net->dev_addr, mac, ETH_ALEN);
+ eth_hw_addr_set(dev->net, mac);
else {
printk(KERN_WARNING
"dm9601: No valid MAC address in EEPROM, using %pM\n",
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
index 06e2181e5810..cd33955df0b6 100644
--- a/drivers/net/usb/ipheth.c
+++ b/drivers/net/usb/ipheth.c
@@ -303,7 +303,7 @@ static int ipheth_get_macaddr(struct ipheth_device *dev)
__func__, retval);
retval = -EINVAL;
} else {
- memcpy(net->dev_addr, dev->ctrl_buf, ETH_ALEN);
+ eth_hw_addr_set(net, dev->ctrl_buf);
retval = 0;
}
diff --git a/drivers/net/usb/kalmia.c b/drivers/net/usb/kalmia.c
index fc5895f85cee..9f2b70ef39aa 100644
--- a/drivers/net/usb/kalmia.c
+++ b/drivers/net/usb/kalmia.c
@@ -149,7 +149,7 @@ kalmia_bind(struct usbnet *dev, struct usb_interface *intf)
if (status)
return status;
- memcpy(dev->net->dev_addr, ethernet_addr, ETH_ALEN);
+ eth_hw_addr_set(dev->net, ethernet_addr);
return status;
}
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index 793f8fbe0069..03319fdb5235 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -1817,7 +1817,7 @@ static void lan78xx_init_mac_address(struct lan78xx_net *dev)
lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
- ether_addr_copy(dev->net->dev_addr, addr);
+ eth_hw_addr_set(dev->net, addr);
}
/* MDIO read and write wrappers for phylib */
@@ -2416,7 +2416,7 @@ static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- ether_addr_copy(netdev->dev_addr, addr->sa_data);
+ eth_hw_addr_set(netdev, addr->sa_data);
addr_lo = netdev->dev_addr[0] |
netdev->dev_addr[1] << 8 |
diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c
index 66866bef25df..5f42db26d200 100644
--- a/drivers/net/usb/mcs7830.c
+++ b/drivers/net/usb/mcs7830.c
@@ -132,7 +132,8 @@ static int mcs7830_hif_get_mac_address(struct usbnet *dev, unsigned char *addr)
return 0;
}
-static int mcs7830_hif_set_mac_address(struct usbnet *dev, unsigned char *addr)
+static int mcs7830_hif_set_mac_address(struct usbnet *dev,
+ const unsigned char *addr)
{
int ret = mcs7830_set_reg(dev, HIF_REG_ETHERNET_ADDR, ETH_ALEN, addr);
@@ -159,7 +160,7 @@ static int mcs7830_set_mac_address(struct net_device *netdev, void *p)
return ret;
/* it worked --> adopt it on netdev side */
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, addr->sa_data);
return 0;
}
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index 6a92a3fef75e..c4cd40b090fd 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -357,7 +357,7 @@ static void set_ethernet_addr(pegasus_t *pegasus)
goto err;
}
- memcpy(pegasus->net->dev_addr, node_id, sizeof(node_id));
+ eth_hw_addr_set(pegasus->net, node_id);
return;
err:
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index f329e39100a7..4a02f33f0643 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -1571,7 +1571,7 @@ static int __rtl8152_set_mac_address(struct net_device *netdev, void *p,
mutex_lock(&tp->control);
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, addr->sa_data);
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG);
pla_ocp_write(tp, PLA_IDR, BYTE_EN_SIX_BYTES, 8, addr->sa_data);
@@ -1719,7 +1719,7 @@ static int set_ethernet_addr(struct r8152 *tp, bool in_resume)
return ret;
if (tp->version == RTL_VER_01)
- ether_addr_copy(dev->dev_addr, sa.sa_data);
+ eth_hw_addr_set(dev, sa.sa_data);
else
ret = __rtl8152_set_mac_address(dev, &sa, in_resume);
diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c
index 85a8b96e39a6..4a84f90e377c 100644
--- a/drivers/net/usb/rndis_host.c
+++ b/drivers/net/usb/rndis_host.c
@@ -421,7 +421,7 @@ generic_rndis_bind(struct usbnet *dev, struct usb_interface *intf, int flags)
if (bp[0] & 0x02)
eth_hw_addr_random(net);
else
- ether_addr_copy(net->dev_addr, bp);
+ eth_hw_addr_set(net, bp);
/* set a nonzero filter to enable data transfers */
memset(u.set, 0, sizeof *u.set);
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index 4a1b0e0fc3a3..3d2bf2acca94 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -262,7 +262,7 @@ static void set_ethernet_addr(rtl8150_t *dev)
ret = get_registers(dev, IDR, sizeof(node_id), node_id);
if (!ret) {
- ether_addr_copy(dev->netdev->dev_addr, node_id);
+ eth_hw_addr_set(dev->netdev, node_id);
} else {
eth_hw_addr_random(dev->netdev);
netdev_notice(dev->netdev, "Assigned a random MAC address: %pM\n",
@@ -278,7 +278,7 @@ static int rtl8150_set_mac_address(struct net_device *netdev, void *p)
if (netif_running(netdev))
return -EBUSY;
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, addr->sa_data);
netdev_dbg(netdev, "Setting MAC address to %pM\n", netdev->dev_addr);
/* Set the IDR registers. */
set_registers(dev, IDR, netdev->addr_len, netdev->dev_addr);
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index 76f7af161313..3b6987bb4fbe 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -758,8 +758,7 @@ static int smsc75xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
static void smsc75xx_init_mac_address(struct usbnet *dev)
{
/* maybe the boot loader passed the MAC address in devicetree */
- if (!eth_platform_get_mac_address(&dev->udev->dev,
- dev->net->dev_addr)) {
+ if (!platform_get_ethdev_address(&dev->udev->dev, dev->net)) {
if (is_valid_ether_addr(dev->net->dev_addr)) {
/* device tree values are valid so use them */
netif_dbg(dev, ifup, dev->net, "MAC address read from the device tree\n");
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 26b1bd8e845b..21a42a6527dc 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -756,8 +756,7 @@ static int smsc95xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
static void smsc95xx_init_mac_address(struct usbnet *dev)
{
/* maybe the boot loader passed the MAC address in devicetree */
- if (!eth_platform_get_mac_address(&dev->udev->dev,
- dev->net->dev_addr)) {
+ if (!platform_get_ethdev_address(&dev->udev->dev, dev->net)) {
if (is_valid_ether_addr(dev->net->dev_addr)) {
/* device tree values are valid so use them */
netif_dbg(dev, ifup, dev->net, "MAC address read from the device tree\n");
diff --git a/drivers/net/usb/sr9700.c b/drivers/net/usb/sr9700.c
index 6516a37893e2..15209de1849e 100644
--- a/drivers/net/usb/sr9700.c
+++ b/drivers/net/usb/sr9700.c
@@ -56,7 +56,8 @@ static int sr_write_reg(struct usbnet *dev, u8 reg, u8 value)
value, reg, NULL, 0);
}
-static void sr_write_async(struct usbnet *dev, u8 reg, u16 length, void *data)
+static void sr_write_async(struct usbnet *dev, u8 reg, u16 length,
+ const void *data)
{
usbnet_write_cmd_async(dev, SR_WR_REGS, SR_REQ_WR_REG,
0, reg, data, length);
@@ -296,7 +297,7 @@ static int sr9700_set_mac_address(struct net_device *netdev, void *p)
return -EINVAL;
}
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, addr->sa_data);
sr_write_async(dev, SR_PAR, 6, netdev->dev_addr);
return 0;
diff --git a/drivers/net/usb/sr9800.c b/drivers/net/usb/sr9800.c
index 576401c8b1be..838f4e9e8b58 100644
--- a/drivers/net/usb/sr9800.c
+++ b/drivers/net/usb/sr9800.c
@@ -503,7 +503,7 @@ static int sr_set_mac_address(struct net_device *net, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(net->dev_addr, addr->sa_data, ETH_ALEN);
+ eth_hw_addr_set(net, addr->sa_data);
/* We use the 20 byte dev->data
* for our 6 byte mac buffer
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 4ad25a8b0870..c501b5974aee 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -80,6 +80,7 @@ struct virtnet_sq_stats {
u64 xdp_tx;
u64 xdp_tx_drops;
u64 kicks;
+ u64 tx_timeouts;
};
struct virtnet_rq_stats {
@@ -103,6 +104,7 @@ static const struct virtnet_stat_desc virtnet_sq_stats_desc[] = {
{ "xdp_tx", VIRTNET_SQ_STAT(xdp_tx) },
{ "xdp_tx_drops", VIRTNET_SQ_STAT(xdp_tx_drops) },
{ "kicks", VIRTNET_SQ_STAT(kicks) },
+ { "tx_timeouts", VIRTNET_SQ_STAT(tx_timeouts) },
};
static const struct virtnet_stat_desc virtnet_rq_stats_desc[] = {
@@ -732,6 +734,12 @@ static struct sk_buff *receive_small(struct net_device *dev,
dev->stats.rx_length_errors++;
goto err_len;
}
+
+ if (likely(!vi->xdp_enabled)) {
+ xdp_prog = NULL;
+ goto skip_xdp;
+ }
+
rcu_read_lock();
xdp_prog = rcu_dereference(rq->xdp_prog);
if (xdp_prog) {
@@ -814,6 +822,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
}
rcu_read_unlock();
+skip_xdp:
skb = build_skb(buf, buflen);
if (!skb) {
put_page(page);
@@ -895,6 +904,12 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
dev->stats.rx_length_errors++;
goto err_skb;
}
+
+ if (likely(!vi->xdp_enabled)) {
+ xdp_prog = NULL;
+ goto skip_xdp;
+ }
+
rcu_read_lock();
xdp_prog = rcu_dereference(rq->xdp_prog);
if (xdp_prog) {
@@ -1022,6 +1037,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
}
rcu_read_unlock();
+skip_xdp:
head_skb = page_to_skb(vi, rq, page, offset, len, truesize, !xdp_prog,
metasize, headroom);
curr_skb = head_skb;
@@ -1860,7 +1876,7 @@ static void virtnet_stats(struct net_device *dev,
int i;
for (i = 0; i < vi->max_queue_pairs; i++) {
- u64 tpackets, tbytes, rpackets, rbytes, rdrops;
+ u64 tpackets, tbytes, terrors, rpackets, rbytes, rdrops;
struct receive_queue *rq = &vi->rq[i];
struct send_queue *sq = &vi->sq[i];
@@ -1868,6 +1884,7 @@ static void virtnet_stats(struct net_device *dev,
start = u64_stats_fetch_begin_irq(&sq->stats.syncp);
tpackets = sq->stats.packets;
tbytes = sq->stats.bytes;
+ terrors = sq->stats.tx_timeouts;
} while (u64_stats_fetch_retry_irq(&sq->stats.syncp, start));
do {
@@ -1882,6 +1899,7 @@ static void virtnet_stats(struct net_device *dev,
tot->rx_bytes += rbytes;
tot->tx_bytes += tbytes;
tot->rx_dropped += rdrops;
+ tot->tx_errors += terrors;
}
tot->tx_dropped = dev->stats.tx_dropped;
@@ -2534,8 +2552,8 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
/* XDP requires extra queues for XDP_TX */
if (curr_qp + xdp_qp > vi->max_queue_pairs) {
- netdev_warn(dev, "XDP request %i queues but max is %i. XDP_TX and XDP_REDIRECT will operate in a slower locked tx mode.\n",
- curr_qp + xdp_qp, vi->max_queue_pairs);
+ netdev_warn_once(dev, "XDP request %i queues but max is %i. XDP_TX and XDP_REDIRECT will operate in a slower locked tx mode.\n",
+ curr_qp + xdp_qp, vi->max_queue_pairs);
xdp_qp = 0;
}
@@ -2663,6 +2681,21 @@ static int virtnet_set_features(struct net_device *dev,
return 0;
}
+static void virtnet_tx_timeout(struct net_device *dev, unsigned int txqueue)
+{
+ struct virtnet_info *priv = netdev_priv(dev);
+ struct send_queue *sq = &priv->sq[txqueue];
+ struct netdev_queue *txq = netdev_get_tx_queue(dev, txqueue);
+
+ u64_stats_update_begin(&sq->stats.syncp);
+ sq->stats.tx_timeouts++;
+ u64_stats_update_end(&sq->stats.syncp);
+
+ netdev_err(dev, "TX timeout on queue: %u, sq: %s, vq: 0x%x, name: %s, %u usecs ago\n",
+ txqueue, sq->name, sq->vq->index, sq->vq->name,
+ jiffies_to_usecs(jiffies - txq->trans_start));
+}
+
static const struct net_device_ops virtnet_netdev = {
.ndo_open = virtnet_open,
.ndo_stop = virtnet_close,
@@ -2678,6 +2711,7 @@ static const struct net_device_ops virtnet_netdev = {
.ndo_features_check = passthru_features_check,
.ndo_get_phys_port_name = virtnet_get_phys_port_name,
.ndo_set_features = virtnet_set_features,
+ .ndo_tx_timeout = virtnet_tx_timeout,
};
static void virtnet_config_changed_work(struct work_struct *work)
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 142f70670f5c..7a205ddf0060 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -2824,7 +2824,7 @@ vmxnet3_set_mac_addr(struct net_device *netdev, void *p)
struct sockaddr *addr = p;
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ dev_addr_set(netdev, addr->sa_data);
vmxnet3_write_mac_addr(adapter, addr->sa_data);
return 0;
@@ -3638,7 +3638,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
#endif
vmxnet3_read_mac_addr(adapter, mac);
- memcpy(netdev->dev_addr, mac, netdev->addr_len);
+ dev_addr_set(netdev, mac);
netdev->netdev_ops = &vmxnet3_netdev_ops;
vmxnet3_set_ethtool_ops(netdev);
diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c
index 89d31adc3809..282192b82404 100644
--- a/drivers/net/wan/lapbether.c
+++ b/drivers/net/wan/lapbether.c
@@ -301,7 +301,7 @@ static int lapbeth_set_mac_address(struct net_device *dev, void *addr)
{
struct sockaddr *sa = addr;
- memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
+ dev_addr_set(dev, sa->sa_data);
return 0;
}
diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c
index 49cc4b7ed516..0e9bad33fac8 100644
--- a/drivers/net/wireless/ath/ar5523/ar5523.c
+++ b/drivers/net/wireless/ath/ar5523/ar5523.c
@@ -1772,9 +1772,8 @@ static const struct usb_device_id ar5523_id_table[] = {
AR5523_DEVICE_UG(0x0846, 0x5f00), /* Netgear / WPN111 */
AR5523_DEVICE_UG(0x083a, 0x4506), /* SMC / EZ Connect
SMCWUSBT-G2 */
- AR5523_DEVICE_UG(0x157e, 0x3006), /* Umedia / AR5523_1 */
+ AR5523_DEVICE_UG(0x157e, 0x3006), /* Umedia / AR5523_1, TEW444UBEU*/
AR5523_DEVICE_UX(0x157e, 0x3205), /* Umedia / AR5523_2 */
- AR5523_DEVICE_UG(0x157e, 0x3006), /* Umedia / TEW444UBEU */
AR5523_DEVICE_UG(0x1435, 0x0826), /* Wistronneweb / AR5523_1 */
AR5523_DEVICE_UX(0x1435, 0x0828), /* Wistronneweb / AR5523_2 */
AR5523_DEVICE_UG(0x0cde, 0x0012), /* Zcom / AR5523 */
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index 2f9be182fbfb..c21e05549f61 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -3224,7 +3224,7 @@ static int ath10k_core_probe_fw(struct ath10k *ar)
ath10k_debug_print_board_info(ar);
}
- device_get_mac_address(ar->dev, ar->mac_addr, sizeof(ar->mac_addr));
+ device_get_mac_address(ar->dev, ar->mac_addr);
ret = ath10k_core_init_firmware_features(ar);
if (ret) {
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index c272b290fa73..7ca68c81d9b6 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -993,8 +993,12 @@ static void ath10k_mac_vif_beacon_cleanup(struct ath10k_vif *arvif)
ath10k_mac_vif_beacon_free(arvif);
if (arvif->beacon_buf) {
- dma_free_coherent(ar->dev, IEEE80211_MAX_FRAME_LEN,
- arvif->beacon_buf, arvif->beacon_paddr);
+ if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL)
+ kfree(arvif->beacon_buf);
+ else
+ dma_free_coherent(ar->dev, IEEE80211_MAX_FRAME_LEN,
+ arvif->beacon_buf,
+ arvif->beacon_paddr);
arvif->beacon_buf = NULL;
}
}
@@ -5576,10 +5580,17 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
if (vif->type == NL80211_IFTYPE_ADHOC ||
vif->type == NL80211_IFTYPE_MESH_POINT ||
vif->type == NL80211_IFTYPE_AP) {
- arvif->beacon_buf = dma_alloc_coherent(ar->dev,
- IEEE80211_MAX_FRAME_LEN,
- &arvif->beacon_paddr,
- GFP_ATOMIC);
+ if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL) {
+ arvif->beacon_buf = kmalloc(IEEE80211_MAX_FRAME_LEN,
+ GFP_KERNEL);
+ arvif->beacon_paddr = (dma_addr_t)arvif->beacon_buf;
+ } else {
+ arvif->beacon_buf =
+ dma_alloc_coherent(ar->dev,
+ IEEE80211_MAX_FRAME_LEN,
+ &arvif->beacon_paddr,
+ GFP_ATOMIC);
+ }
if (!arvif->beacon_buf) {
ret = -ENOMEM;
ath10k_warn(ar, "failed to allocate beacon buffer: %d\n",
@@ -5794,8 +5805,12 @@ err_vdev_delete:
err:
if (arvif->beacon_buf) {
- dma_free_coherent(ar->dev, IEEE80211_MAX_FRAME_LEN,
- arvif->beacon_buf, arvif->beacon_paddr);
+ if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL)
+ kfree(arvif->beacon_buf);
+ else
+ dma_free_coherent(ar->dev, IEEE80211_MAX_FRAME_LEN,
+ arvif->beacon_buf,
+ arvif->beacon_paddr);
arvif->beacon_buf = NULL;
}
diff --git a/drivers/net/wireless/ath/ath10k/sdio.c b/drivers/net/wireless/ath/ath10k/sdio.c
index b746052737e0..eb705214f3f0 100644
--- a/drivers/net/wireless/ath/ath10k/sdio.c
+++ b/drivers/net/wireless/ath/ath10k/sdio.c
@@ -1363,8 +1363,11 @@ static void ath10k_rx_indication_async_work(struct work_struct *work)
ep->ep_ops.ep_rx_complete(ar, skb);
}
- if (test_bit(ATH10K_FLAG_CORE_REGISTERED, &ar->dev_flags))
+ if (test_bit(ATH10K_FLAG_CORE_REGISTERED, &ar->dev_flags)) {
+ local_bh_disable();
napi_schedule(&ar->napi);
+ local_bh_enable();
+ }
}
static int ath10k_sdio_read_rtc_state(struct ath10k_sdio *ar_sdio, unsigned char *state)
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index b8a4bbfe10b8..7c1c2658cb5f 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -2610,6 +2610,10 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
if (ieee80211_is_beacon(hdr->frame_control))
ath10k_mac_handle_beacon(ar, skb);
+ if (ieee80211_is_beacon(hdr->frame_control) ||
+ ieee80211_is_probe_resp(hdr->frame_control))
+ status->boottime_ns = ktime_get_boottime_ns();
+
ath10k_dbg(ar, ATH10K_DBG_MGMT,
"event mgmt rx skb %pK len %d ftype %02x stype %02x\n",
skb, skb->len,
diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c
index 969bf1a590d9..2328e594a96a 100644
--- a/drivers/net/wireless/ath/ath11k/core.c
+++ b/drivers/net/wireless/ath/ath11k/core.c
@@ -37,7 +37,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.fw = {
.dir = "IPQ8074/hw2.0",
.board_size = 256 * 1024,
- .cal_size = 256 * 1024,
+ .cal_offset = 128 * 1024,
},
.max_radios = 3,
.bdf_addr = 0x4B0C0000,
@@ -59,7 +59,17 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.vdev_start_delay = false,
.htt_peer_map_v2 = true,
.tcl_0_only = false,
- .spectral_fft_sz = 2,
+
+ .spectral = {
+ .fft_sz = 2,
+ /* HW bug, expected BIN size is 2 bytes but HW report as 4 bytes.
+ * so added pad size as 2 bytes to compensate the BIN size
+ */
+ .fft_pad_sz = 2,
+ .summary_pad_sz = 0,
+ .fft_hdr_len = 16,
+ .max_fft_bins = 512,
+ },
.interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_AP) |
@@ -78,7 +88,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.fw = {
.dir = "IPQ6018/hw1.0",
.board_size = 256 * 1024,
- .cal_size = 256 * 1024,
+ .cal_offset = 128 * 1024,
},
.max_radios = 2,
.bdf_addr = 0x4ABC0000,
@@ -100,7 +110,14 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.vdev_start_delay = false,
.htt_peer_map_v2 = true,
.tcl_0_only = false,
- .spectral_fft_sz = 4,
+
+ .spectral = {
+ .fft_sz = 4,
+ .fft_pad_sz = 0,
+ .summary_pad_sz = 0,
+ .fft_hdr_len = 16,
+ .max_fft_bins = 512,
+ },
.interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_AP) |
@@ -119,7 +136,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.fw = {
.dir = "QCA6390/hw2.0",
.board_size = 256 * 1024,
- .cal_size = 256 * 1024,
+ .cal_offset = 128 * 1024,
},
.max_radios = 3,
.bdf_addr = 0x4B0C0000,
@@ -141,7 +158,14 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.vdev_start_delay = true,
.htt_peer_map_v2 = false,
.tcl_0_only = true,
- .spectral_fft_sz = 0,
+
+ .spectral = {
+ .fft_sz = 0,
+ .fft_pad_sz = 0,
+ .summary_pad_sz = 0,
+ .fft_hdr_len = 0,
+ .max_fft_bins = 0,
+ },
.interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_AP),
@@ -159,7 +183,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.fw = {
.dir = "QCN9074/hw1.0",
.board_size = 256 * 1024,
- .cal_size = 256 * 1024,
+ .cal_offset = 128 * 1024,
},
.max_radios = 1,
.single_pdev_only = false,
@@ -180,6 +204,15 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.vdev_start_delay = false,
.htt_peer_map_v2 = true,
.tcl_0_only = false,
+
+ .spectral = {
+ .fft_sz = 2,
+ .fft_pad_sz = 0,
+ .summary_pad_sz = 16,
+ .fft_hdr_len = 24,
+ .max_fft_bins = 1024,
+ },
+
.interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_AP) |
BIT(NL80211_IFTYPE_MESH_POINT),
@@ -197,7 +230,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.fw = {
.dir = "WCN6855/hw2.0",
.board_size = 256 * 1024,
- .cal_size = 256 * 1024,
+ .cal_offset = 128 * 1024,
},
.max_radios = 3,
.bdf_addr = 0x4B0C0000,
@@ -219,7 +252,14 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.vdev_start_delay = true,
.htt_peer_map_v2 = false,
.tcl_0_only = true,
- .spectral_fft_sz = 0,
+
+ .spectral = {
+ .fft_sz = 0,
+ .fft_pad_sz = 0,
+ .summary_pad_sz = 0,
+ .fft_hdr_len = 0,
+ .max_fft_bins = 0,
+ },
.interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_AP),
diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h
index 018fb2385f2a..31d234a51c79 100644
--- a/drivers/net/wireless/ath/ath11k/core.h
+++ b/drivers/net/wireless/ath/ath11k/core.h
@@ -93,6 +93,8 @@ struct ath11k_skb_rxcb {
bool is_first_msdu;
bool is_last_msdu;
bool is_continuation;
+ bool is_mcbc;
+ bool is_eapol;
struct hal_rx_desc *rx_desc;
u8 err_rel_src;
u8 err_code;
@@ -100,6 +102,8 @@ struct ath11k_skb_rxcb {
u8 unmapped;
u8 is_frag;
u8 tid;
+ u16 peer_id;
+ u16 seq_no;
};
enum ath11k_hw_rev {
@@ -193,7 +197,9 @@ enum ath11k_dev_flags {
};
enum ath11k_monitor_flags {
- ATH11K_FLAG_MONITOR_ENABLED,
+ ATH11K_FLAG_MONITOR_CONF_ENABLED,
+ ATH11K_FLAG_MONITOR_STARTED,
+ ATH11K_FLAG_MONITOR_VDEV_CREATED,
};
struct ath11k_vif {
@@ -362,6 +368,7 @@ struct ath11k_sta {
enum hal_pn_type pn_type;
struct work_struct update_wk;
+ struct work_struct set_4addr_wk;
struct rate_info txrate;
struct rate_info last_txrate;
u64 rx_duration;
@@ -374,12 +381,15 @@ struct ath11k_sta {
/* protected by conf_mutex */
bool aggr_mode;
#endif
+
+ bool use_4addr_set;
+ u16 tcl_metadata;
};
#define ATH11K_MIN_5G_FREQ 4150
-#define ATH11K_MIN_6G_FREQ 5945
+#define ATH11K_MIN_6G_FREQ 5925
#define ATH11K_MAX_6G_FREQ 7115
-#define ATH11K_NUM_CHANS 100
+#define ATH11K_NUM_CHANS 101
#define ATH11K_MAX_5G_CHAN 173
enum ath11k_state {
@@ -484,7 +494,6 @@ struct ath11k {
u32 chan_tx_pwr;
u32 num_stations;
u32 max_num_stations;
- bool monitor_present;
/* To synchronize concurrent synchronous mac80211 callback operations,
* concurrent debugfs configuration and concurrent FW statistics events.
*/
@@ -559,6 +568,7 @@ struct ath11k {
struct ath11k_per_peer_tx_stats cached_stats;
u32 last_ppdu_id;
u32 cached_ppdu_id;
+ int monitor_vdev_id;
#ifdef CONFIG_ATH11K_DEBUGFS
struct ath11k_debug debug;
#endif
@@ -591,6 +601,8 @@ struct ath11k_pdev_cap {
u32 tx_chain_mask_shift;
u32 rx_chain_mask_shift;
struct ath11k_band_cap band[NUM_NL80211_BANDS];
+ bool nss_ratio_enabled;
+ u8 nss_ratio_info;
};
struct ath11k_pdev {
@@ -794,12 +806,15 @@ struct ath11k_fw_stats_pdev {
s32 hw_reaped;
/* Num underruns */
s32 underrun;
+ /* Num hw paused */
+ u32 hw_paused;
/* Num PPDUs cleaned up in TX abort */
s32 tx_abort;
/* Num MPDUs requeued by SW */
s32 mpdus_requeued;
/* excessive retries */
u32 tx_ko;
+ u32 tx_xretry;
/* data hw rate code */
u32 data_rc;
/* Scheduler self triggers */
@@ -820,6 +835,30 @@ struct ath11k_fw_stats_pdev {
u32 phy_underrun;
/* MPDU is more than txop limit */
u32 txop_ovf;
+ /* Num sequences posted */
+ u32 seq_posted;
+ /* Num sequences failed in queueing */
+ u32 seq_failed_queueing;
+ /* Num sequences completed */
+ u32 seq_completed;
+ /* Num sequences restarted */
+ u32 seq_restarted;
+ /* Num of MU sequences posted */
+ u32 mu_seq_posted;
+ /* Num MPDUs flushed by SW, HWPAUSED, SW TXABORT
+ * (Reset,channel change)
+ */
+ s32 mpdus_sw_flush;
+ /* Num MPDUs filtered by HW, all filter condition (TTL expired) */
+ s32 mpdus_hw_filter;
+ /* Num MPDUs truncated by PDG (TXOP, TBTT,
+ * PPDU_duration based on rate, dyn_bw)
+ */
+ s32 mpdus_truncated;
+ /* Num MPDUs that was tried but didn't receive ACK or BA */
+ s32 mpdus_ack_failed;
+ /* Num MPDUs that was dropped du to expiry. */
+ s32 mpdus_expired;
/* PDEV RX stats */
/* Cnts any change in ring routing mid-ppdu */
@@ -845,6 +884,8 @@ struct ath11k_fw_stats_pdev {
s32 phy_err_drop;
/* Number of mpdu errors - FCS, MIC, ENC etc. */
s32 mpdu_errs;
+ /* Num overflow errors */
+ s32 rx_ovfl_errs;
};
struct ath11k_fw_stats_vdev {
diff --git a/drivers/net/wireless/ath/ath11k/dbring.c b/drivers/net/wireless/ath/ath11k/dbring.c
index 5e1f5437b418..fd98ba5b1130 100644
--- a/drivers/net/wireless/ath/ath11k/dbring.c
+++ b/drivers/net/wireless/ath/ath11k/dbring.c
@@ -8,8 +8,7 @@
static int ath11k_dbring_bufs_replenish(struct ath11k *ar,
struct ath11k_dbring *ring,
- struct ath11k_dbring_element *buff,
- gfp_t gfp)
+ struct ath11k_dbring_element *buff)
{
struct ath11k_base *ab = ar->ab;
struct hal_srng *srng;
@@ -35,7 +34,7 @@ static int ath11k_dbring_bufs_replenish(struct ath11k *ar,
goto err;
spin_lock_bh(&ring->idr_lock);
- buf_id = idr_alloc(&ring->bufs_idr, buff, 0, ring->bufs_max, gfp);
+ buf_id = idr_alloc(&ring->bufs_idr, buff, 0, ring->bufs_max, GFP_ATOMIC);
spin_unlock_bh(&ring->idr_lock);
if (buf_id < 0) {
ret = -ENOBUFS;
@@ -72,8 +71,7 @@ err:
}
static int ath11k_dbring_fill_bufs(struct ath11k *ar,
- struct ath11k_dbring *ring,
- gfp_t gfp)
+ struct ath11k_dbring *ring)
{
struct ath11k_dbring_element *buff;
struct hal_srng *srng;
@@ -92,11 +90,11 @@ static int ath11k_dbring_fill_bufs(struct ath11k *ar,
size = sizeof(*buff) + ring->buf_sz + align - 1;
while (num_remain > 0) {
- buff = kzalloc(size, gfp);
+ buff = kzalloc(size, GFP_ATOMIC);
if (!buff)
break;
- ret = ath11k_dbring_bufs_replenish(ar, ring, buff, gfp);
+ ret = ath11k_dbring_bufs_replenish(ar, ring, buff);
if (ret) {
ath11k_warn(ar->ab, "failed to replenish db ring num_remain %d req_ent %d\n",
num_remain, req_entries);
@@ -176,7 +174,7 @@ int ath11k_dbring_buf_setup(struct ath11k *ar,
ring->hp_addr = ath11k_hal_srng_get_hp_addr(ar->ab, srng);
ring->tp_addr = ath11k_hal_srng_get_tp_addr(ar->ab, srng);
- ret = ath11k_dbring_fill_bufs(ar, ring, GFP_KERNEL);
+ ret = ath11k_dbring_fill_bufs(ar, ring);
return ret;
}
@@ -322,7 +320,7 @@ int ath11k_dbring_buffer_release_event(struct ath11k_base *ab,
}
memset(buff, 0, size);
- ath11k_dbring_bufs_replenish(ar, ring, buff, GFP_ATOMIC);
+ ath11k_dbring_bufs_replenish(ar, ring, buff);
}
spin_unlock_bh(&srng->lock);
diff --git a/drivers/net/wireless/ath/ath11k/debugfs.c b/drivers/net/wireless/ath/ath11k/debugfs.c
index 554feaf1ed5c..17f0bbbac7ae 100644
--- a/drivers/net/wireless/ath/ath11k/debugfs.c
+++ b/drivers/net/wireless/ath/ath11k/debugfs.c
@@ -902,7 +902,7 @@ static ssize_t ath11k_write_pktlog_filter(struct file *file,
struct htt_rx_ring_tlv_filter tlv_filter = {0};
u32 rx_filter = 0, ring_id, filter, mode;
u8 buf[128] = {0};
- int i, ret;
+ int i, ret, rx_buf_sz = 0;
ssize_t rc;
mutex_lock(&ar->conf_mutex);
@@ -940,6 +940,17 @@ static ssize_t ath11k_write_pktlog_filter(struct file *file,
}
}
+ /* Clear rx filter set for monitor mode and rx status */
+ for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
+ ring_id = ar->dp.rx_mon_status_refill_ring[i].refill_buf_ring.ring_id;
+ ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, ar->dp.mac_id,
+ HAL_RXDMA_MONITOR_STATUS,
+ rx_buf_sz, &tlv_filter);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set rx filter for monitor status ring\n");
+ goto out;
+ }
+ }
#define HTT_RX_FILTER_TLV_LITE_MODE \
(HTT_RX_FILTER_TLV_FLAGS_PPDU_START | \
HTT_RX_FILTER_TLV_FLAGS_PPDU_END | \
@@ -955,6 +966,7 @@ static ssize_t ath11k_write_pktlog_filter(struct file *file,
HTT_RX_FILTER_TLV_FLAGS_MPDU_END |
HTT_RX_FILTER_TLV_FLAGS_PACKET_HEADER |
HTT_RX_FILTER_TLV_FLAGS_ATTENTION;
+ rx_buf_sz = DP_RX_BUFFER_SIZE;
} else if (mode == ATH11K_PKTLOG_MODE_LITE) {
ret = ath11k_dp_tx_htt_h2t_ppdu_stats_req(ar,
HTT_PPDU_STATS_TAG_PKTLOG);
@@ -964,7 +976,12 @@ static ssize_t ath11k_write_pktlog_filter(struct file *file,
}
rx_filter = HTT_RX_FILTER_TLV_LITE_MODE;
+ rx_buf_sz = DP_RX_BUFFER_SIZE_LITE;
} else {
+ rx_buf_sz = DP_RX_BUFFER_SIZE;
+ tlv_filter = ath11k_mac_mon_status_filter_default;
+ rx_filter = tlv_filter.rx_filter;
+
ret = ath11k_dp_tx_htt_h2t_ppdu_stats_req(ar,
HTT_PPDU_STATS_TAG_DEFAULT);
if (ret) {
@@ -988,7 +1005,7 @@ static ssize_t ath11k_write_pktlog_filter(struct file *file,
ret = ath11k_dp_tx_htt_rx_filter_setup(ab, ring_id,
ar->dp.mac_id + i,
HAL_RXDMA_MONITOR_STATUS,
- DP_RX_BUFFER_SIZE, &tlv_filter);
+ rx_buf_sz, &tlv_filter);
if (ret) {
ath11k_warn(ab, "failed to set rx filter for monitor status ring\n");
@@ -996,8 +1013,8 @@ static ssize_t ath11k_write_pktlog_filter(struct file *file,
}
}
- ath11k_dbg(ab, ATH11K_DBG_WMI, "pktlog filter %d mode %s\n",
- filter, ((mode == ATH11K_PKTLOG_MODE_FULL) ? "full" : "lite"));
+ ath11k_info(ab, "pktlog mode %s\n",
+ ((mode == ATH11K_PKTLOG_MODE_FULL) ? "full" : "lite"));
ar->debug.pktlog_filter = filter;
ar->debug.pktlog_mode = mode;
diff --git a/drivers/net/wireless/ath/ath11k/debugfs.h b/drivers/net/wireless/ath/ath11k/debugfs.h
index e5346af71f24..ec743a015dc7 100644
--- a/drivers/net/wireless/ath/ath11k/debugfs.h
+++ b/drivers/net/wireless/ath/ath11k/debugfs.h
@@ -38,6 +38,10 @@ enum ath11k_dbg_htt_ext_stats_type {
ATH11K_DBG_HTT_EXT_STATS_TX_SOUNDING_INFO = 22,
ATH11K_DBG_HTT_EXT_STATS_PDEV_OBSS_PD_STATS = 23,
ATH11K_DBG_HTT_EXT_STATS_RING_BACKPRESSURE_STATS = 24,
+ ATH11K_DBG_HTT_EXT_STATS_PEER_CTRL_PATH_TXRX_STATS = 29,
+ ATH11K_DBG_HTT_EXT_STATS_PDEV_TX_RATE_TXBF_STATS = 31,
+ ATH11K_DBG_HTT_EXT_STATS_TXBF_OFDMA = 32,
+ ATH11K_DBG_HTT_EXT_PHY_COUNTERS_AND_PHY_STATS = 37,
/* keep this last */
ATH11K_DBG_HTT_NUM_EXT_STATS,
diff --git a/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.c b/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.c
index 9e0c90da99d3..4484235bcda4 100644
--- a/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.c
+++ b/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.c
@@ -10,23 +10,28 @@
#include "debug.h"
#include "debugfs_htt_stats.h"
-#define HTT_DBG_OUT(buf, len, fmt, ...) \
- scnprintf(buf, len, fmt "\n", ##__VA_ARGS__)
-
-#define HTT_MAX_STRING_LEN 256
#define HTT_MAX_PRINT_CHAR_PER_ELEM 15
#define HTT_TLV_HDR_LEN 4
-#define ARRAY_TO_STRING(out, arr, len) \
+#define PRINT_ARRAY_TO_BUF(out, buflen, arr, str, len, newline) \
do { \
- int index = 0; u8 i; \
+ int index = 0; u8 i; const char *str_val = str; \
+ const char *new_line = newline; \
+ if (str_val) { \
+ index += scnprintf((out + buflen), \
+ (ATH11K_HTT_STATS_BUF_SIZE - buflen), \
+ "%s = ", str_val); \
+ } \
for (i = 0; i < len; i++) { \
- index += scnprintf(out + index, HTT_MAX_STRING_LEN - index, \
- " %u:%u,", i, arr[i]); \
- if (index < 0 || index >= HTT_MAX_STRING_LEN) \
- break; \
+ index += scnprintf((out + buflen) + index, \
+ (ATH11K_HTT_STATS_BUF_SIZE - buflen) - index, \
+ " %u:%u,", i, arr[i]); \
} \
+ index += scnprintf((out + buflen) + index, \
+ (ATH11K_HTT_STATS_BUF_SIZE - buflen) - index, \
+ "%s", new_line); \
+ buflen += index; \
} while (0)
static inline void htt_print_stats_string_tlv(const void *tag_buf,
@@ -38,22 +43,20 @@ static inline void htt_print_stats_string_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
u8 i;
- u16 index = 0;
- char data[HTT_MAX_STRING_LEN] = {0};
tag_len = tag_len >> 2;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_STATS_STRING_TLV:");
+ len += scnprintf(buf + len, buf_len - len, "HTT_STATS_STRING_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len,
+ "data = ");
for (i = 0; i < tag_len; i++) {
- index += scnprintf(&data[index],
- HTT_MAX_STRING_LEN - index,
- "%.*s", 4, (char *)&(htt_stats_buf->data[i]));
- if (index >= HTT_MAX_STRING_LEN)
- break;
+ len += scnprintf(buf + len,
+ buf_len - len,
+ "%.*s", 4, (char *)&(htt_stats_buf->data[i]));
}
-
- len += HTT_DBG_OUT(buf + len, buf_len - len, "data = %s\n", data);
+ /* New lines are added for better display */
+ len += scnprintf(buf + len, buf_len - len, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -71,107 +74,107 @@ static inline void htt_print_tx_pdev_stats_cmn_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_PDEV_STATS_CMN_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u",
- htt_stats_buf->mac_id__word & 0xFF);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "hw_queued = %u",
- htt_stats_buf->hw_queued);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "hw_reaped = %u",
- htt_stats_buf->hw_reaped);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "underrun = %u",
- htt_stats_buf->underrun);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "hw_paused = %u",
- htt_stats_buf->hw_paused);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "hw_flush = %u",
- htt_stats_buf->hw_flush);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "hw_filt = %u",
- htt_stats_buf->hw_filt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_abort = %u",
- htt_stats_buf->tx_abort);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_requeued = %u",
- htt_stats_buf->mpdu_requeued);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_xretry = %u",
- htt_stats_buf->tx_xretry);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "data_rc = %u",
- htt_stats_buf->data_rc);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_dropped_xretry = %u",
- htt_stats_buf->mpdu_dropped_xretry);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "illegal_rate_phy_err = %u",
- htt_stats_buf->illgl_rate_phy_err);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "cont_xretry = %u",
- htt_stats_buf->cont_xretry);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_timeout = %u",
- htt_stats_buf->tx_timeout);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "pdev_resets = %u",
- htt_stats_buf->pdev_resets);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "phy_underrun = %u",
- htt_stats_buf->phy_underrun);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "txop_ovf = %u",
- htt_stats_buf->txop_ovf);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "seq_posted = %u",
- htt_stats_buf->seq_posted);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "seq_failed_queueing = %u",
- htt_stats_buf->seq_failed_queueing);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "seq_completed = %u",
- htt_stats_buf->seq_completed);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "seq_restarted = %u",
- htt_stats_buf->seq_restarted);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mu_seq_posted = %u",
- htt_stats_buf->mu_seq_posted);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "seq_switch_hw_paused = %u",
- htt_stats_buf->seq_switch_hw_paused);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "next_seq_posted_dsr = %u",
- htt_stats_buf->next_seq_posted_dsr);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "seq_posted_isr = %u",
- htt_stats_buf->seq_posted_isr);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "seq_ctrl_cached = %u",
- htt_stats_buf->seq_ctrl_cached);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_count_tqm = %u",
- htt_stats_buf->mpdu_count_tqm);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "msdu_count_tqm = %u",
- htt_stats_buf->msdu_count_tqm);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_removed_tqm = %u",
- htt_stats_buf->mpdu_removed_tqm);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "msdu_removed_tqm = %u",
- htt_stats_buf->msdu_removed_tqm);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdus_sw_flush = %u",
- htt_stats_buf->mpdus_sw_flush);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdus_hw_filter = %u",
- htt_stats_buf->mpdus_hw_filter);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdus_truncated = %u",
- htt_stats_buf->mpdus_truncated);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdus_ack_failed = %u",
- htt_stats_buf->mpdus_ack_failed);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdus_expired = %u",
- htt_stats_buf->mpdus_expired);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdus_seq_hw_retry = %u",
- htt_stats_buf->mpdus_seq_hw_retry);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ack_tlv_proc = %u",
- htt_stats_buf->ack_tlv_proc);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "coex_abort_mpdu_cnt_valid = %u",
- htt_stats_buf->coex_abort_mpdu_cnt_valid);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "coex_abort_mpdu_cnt = %u",
- htt_stats_buf->coex_abort_mpdu_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "num_total_ppdus_tried_ota = %u",
- htt_stats_buf->num_total_ppdus_tried_ota);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "num_data_ppdus_tried_ota = %u",
- htt_stats_buf->num_data_ppdus_tried_ota);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "local_ctrl_mgmt_enqued = %u",
- htt_stats_buf->local_ctrl_mgmt_enqued);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "local_ctrl_mgmt_freed = %u",
- htt_stats_buf->local_ctrl_mgmt_freed);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "local_data_enqued = %u",
- htt_stats_buf->local_data_enqued);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "local_data_freed = %u",
- htt_stats_buf->local_data_freed);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_tried = %u",
- htt_stats_buf->mpdu_tried);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "isr_wait_seq_posted = %u",
- htt_stats_buf->isr_wait_seq_posted);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_active_dur_us_low = %u",
- htt_stats_buf->tx_active_dur_us_low);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_active_dur_us_high = %u\n",
- htt_stats_buf->tx_active_dur_us_high);
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_PDEV_STATS_CMN_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n",
+ FIELD_GET(HTT_STATS_MAC_ID, htt_stats_buf->mac_id__word));
+ len += scnprintf(buf + len, buf_len - len, "hw_queued = %u\n",
+ htt_stats_buf->hw_queued);
+ len += scnprintf(buf + len, buf_len - len, "hw_reaped = %u\n",
+ htt_stats_buf->hw_reaped);
+ len += scnprintf(buf + len, buf_len - len, "underrun = %u\n",
+ htt_stats_buf->underrun);
+ len += scnprintf(buf + len, buf_len - len, "hw_paused = %u\n",
+ htt_stats_buf->hw_paused);
+ len += scnprintf(buf + len, buf_len - len, "hw_flush = %u\n",
+ htt_stats_buf->hw_flush);
+ len += scnprintf(buf + len, buf_len - len, "hw_filt = %u\n",
+ htt_stats_buf->hw_filt);
+ len += scnprintf(buf + len, buf_len - len, "tx_abort = %u\n",
+ htt_stats_buf->tx_abort);
+ len += scnprintf(buf + len, buf_len - len, "mpdu_requeued = %u\n",
+ htt_stats_buf->mpdu_requeued);
+ len += scnprintf(buf + len, buf_len - len, "tx_xretry = %u\n",
+ htt_stats_buf->tx_xretry);
+ len += scnprintf(buf + len, buf_len - len, "data_rc = %u\n",
+ htt_stats_buf->data_rc);
+ len += scnprintf(buf + len, buf_len - len, "mpdu_dropped_xretry = %u\n",
+ htt_stats_buf->mpdu_dropped_xretry);
+ len += scnprintf(buf + len, buf_len - len, "illegal_rate_phy_err = %u\n",
+ htt_stats_buf->illgl_rate_phy_err);
+ len += scnprintf(buf + len, buf_len - len, "cont_xretry = %u\n",
+ htt_stats_buf->cont_xretry);
+ len += scnprintf(buf + len, buf_len - len, "tx_timeout = %u\n",
+ htt_stats_buf->tx_timeout);
+ len += scnprintf(buf + len, buf_len - len, "pdev_resets = %u\n",
+ htt_stats_buf->pdev_resets);
+ len += scnprintf(buf + len, buf_len - len, "phy_underrun = %u\n",
+ htt_stats_buf->phy_underrun);
+ len += scnprintf(buf + len, buf_len - len, "txop_ovf = %u\n",
+ htt_stats_buf->txop_ovf);
+ len += scnprintf(buf + len, buf_len - len, "seq_posted = %u\n",
+ htt_stats_buf->seq_posted);
+ len += scnprintf(buf + len, buf_len - len, "seq_failed_queueing = %u\n",
+ htt_stats_buf->seq_failed_queueing);
+ len += scnprintf(buf + len, buf_len - len, "seq_completed = %u\n",
+ htt_stats_buf->seq_completed);
+ len += scnprintf(buf + len, buf_len - len, "seq_restarted = %u\n",
+ htt_stats_buf->seq_restarted);
+ len += scnprintf(buf + len, buf_len - len, "mu_seq_posted = %u\n",
+ htt_stats_buf->mu_seq_posted);
+ len += scnprintf(buf + len, buf_len - len, "seq_switch_hw_paused = %u\n",
+ htt_stats_buf->seq_switch_hw_paused);
+ len += scnprintf(buf + len, buf_len - len, "next_seq_posted_dsr = %u\n",
+ htt_stats_buf->next_seq_posted_dsr);
+ len += scnprintf(buf + len, buf_len - len, "seq_posted_isr = %u\n",
+ htt_stats_buf->seq_posted_isr);
+ len += scnprintf(buf + len, buf_len - len, "seq_ctrl_cached = %u\n",
+ htt_stats_buf->seq_ctrl_cached);
+ len += scnprintf(buf + len, buf_len - len, "mpdu_count_tqm = %u\n",
+ htt_stats_buf->mpdu_count_tqm);
+ len += scnprintf(buf + len, buf_len - len, "msdu_count_tqm = %u\n",
+ htt_stats_buf->msdu_count_tqm);
+ len += scnprintf(buf + len, buf_len - len, "mpdu_removed_tqm = %u\n",
+ htt_stats_buf->mpdu_removed_tqm);
+ len += scnprintf(buf + len, buf_len - len, "msdu_removed_tqm = %u\n",
+ htt_stats_buf->msdu_removed_tqm);
+ len += scnprintf(buf + len, buf_len - len, "mpdus_sw_flush = %u\n",
+ htt_stats_buf->mpdus_sw_flush);
+ len += scnprintf(buf + len, buf_len - len, "mpdus_hw_filter = %u\n",
+ htt_stats_buf->mpdus_hw_filter);
+ len += scnprintf(buf + len, buf_len - len, "mpdus_truncated = %u\n",
+ htt_stats_buf->mpdus_truncated);
+ len += scnprintf(buf + len, buf_len - len, "mpdus_ack_failed = %u\n",
+ htt_stats_buf->mpdus_ack_failed);
+ len += scnprintf(buf + len, buf_len - len, "mpdus_expired = %u\n",
+ htt_stats_buf->mpdus_expired);
+ len += scnprintf(buf + len, buf_len - len, "mpdus_seq_hw_retry = %u\n",
+ htt_stats_buf->mpdus_seq_hw_retry);
+ len += scnprintf(buf + len, buf_len - len, "ack_tlv_proc = %u\n",
+ htt_stats_buf->ack_tlv_proc);
+ len += scnprintf(buf + len, buf_len - len, "coex_abort_mpdu_cnt_valid = %u\n",
+ htt_stats_buf->coex_abort_mpdu_cnt_valid);
+ len += scnprintf(buf + len, buf_len - len, "coex_abort_mpdu_cnt = %u\n",
+ htt_stats_buf->coex_abort_mpdu_cnt);
+ len += scnprintf(buf + len, buf_len - len, "num_total_ppdus_tried_ota = %u\n",
+ htt_stats_buf->num_total_ppdus_tried_ota);
+ len += scnprintf(buf + len, buf_len - len, "num_data_ppdus_tried_ota = %u\n",
+ htt_stats_buf->num_data_ppdus_tried_ota);
+ len += scnprintf(buf + len, buf_len - len, "local_ctrl_mgmt_enqued = %u\n",
+ htt_stats_buf->local_ctrl_mgmt_enqued);
+ len += scnprintf(buf + len, buf_len - len, "local_ctrl_mgmt_freed = %u\n",
+ htt_stats_buf->local_ctrl_mgmt_freed);
+ len += scnprintf(buf + len, buf_len - len, "local_data_enqued = %u\n",
+ htt_stats_buf->local_data_enqued);
+ len += scnprintf(buf + len, buf_len - len, "local_data_freed = %u\n",
+ htt_stats_buf->local_data_freed);
+ len += scnprintf(buf + len, buf_len - len, "mpdu_tried = %u\n",
+ htt_stats_buf->mpdu_tried);
+ len += scnprintf(buf + len, buf_len - len, "isr_wait_seq_posted = %u\n",
+ htt_stats_buf->isr_wait_seq_posted);
+ len += scnprintf(buf + len, buf_len - len, "tx_active_dur_us_low = %u\n",
+ htt_stats_buf->tx_active_dur_us_low);
+ len += scnprintf(buf + len, buf_len - len, "tx_active_dur_us_high = %u\n\n",
+ htt_stats_buf->tx_active_dur_us_high);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -190,13 +193,12 @@ htt_print_tx_pdev_stats_urrn_tlv_v(const void *tag_buf,
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- char urrn_stats[HTT_MAX_STRING_LEN] = {0};
u16 num_elems = min_t(u16, (tag_len >> 2), HTT_TX_PDEV_MAX_URRN_STATS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_PDEV_STATS_URRN_TLV_V:");
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_PDEV_STATS_URRN_TLV_V:\n");
- ARRAY_TO_STRING(urrn_stats, htt_stats_buf->urrn_stats, num_elems);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "urrn_stats = %s\n", urrn_stats);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->urrn_stats, "urrn_stats",
+ num_elems, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -215,13 +217,12 @@ htt_print_tx_pdev_stats_flush_tlv_v(const void *tag_buf,
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- char flush_errs[HTT_MAX_STRING_LEN] = {0};
u16 num_elems = min_t(u16, (tag_len >> 2), HTT_TX_PDEV_MAX_FLUSH_REASON_STATS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_PDEV_STATS_FLUSH_TLV_V:");
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_PDEV_STATS_FLUSH_TLV_V:\n");
- ARRAY_TO_STRING(flush_errs, htt_stats_buf->flush_errs, num_elems);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "flush_errs = %s\n", flush_errs);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->flush_errs, "flush_errs",
+ num_elems, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -240,14 +241,12 @@ htt_print_tx_pdev_stats_sifs_tlv_v(const void *tag_buf,
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- char sifs_status[HTT_MAX_STRING_LEN] = {0};
u16 num_elems = min_t(u16, (tag_len >> 2), HTT_TX_PDEV_MAX_SIFS_BURST_STATS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_PDEV_STATS_SIFS_TLV_V:");
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_PDEV_STATS_SIFS_TLV_V:\n");
- ARRAY_TO_STRING(sifs_status, htt_stats_buf->sifs_status, num_elems);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "sifs_status = %s\n",
- sifs_status);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->sifs_status, "sifs_status",
+ num_elems, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -266,13 +265,12 @@ htt_print_tx_pdev_stats_phy_err_tlv_v(const void *tag_buf,
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- char phy_errs[HTT_MAX_STRING_LEN] = {0};
u16 num_elems = min_t(u16, (tag_len >> 2), HTT_TX_PDEV_MAX_PHY_ERR_STATS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_PDEV_STATS_PHY_ERR_TLV_V:");
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_PDEV_STATS_PHY_ERR_TLV_V:\n");
- ARRAY_TO_STRING(phy_errs, htt_stats_buf->phy_errs, num_elems);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "phy_errs = %s\n", phy_errs);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->phy_errs, "phy_errs",
+ num_elems, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -291,15 +289,13 @@ htt_print_tx_pdev_stats_sifs_hist_tlv_v(const void *tag_buf,
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- char sifs_hist_status[HTT_MAX_STRING_LEN] = {0};
u16 num_elems = min_t(u16, (tag_len >> 2), HTT_TX_PDEV_MAX_SIFS_BURST_HIST_STATS);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "HTT_TX_PDEV_STATS_SIFS_HIST_TLV_V:");
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_PDEV_STATS_SIFS_HIST_TLV_V:\n");
- ARRAY_TO_STRING(sifs_hist_status, htt_stats_buf->sifs_hist_status, num_elems);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "sifs_hist_status = %s\n",
- sifs_hist_status);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->sifs_hist_status,
+ "sifs_hist_status", num_elems, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -318,23 +314,23 @@ htt_print_tx_pdev_stats_tx_ppdu_stats_tlv_v(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "HTT_TX_PDEV_STATS_TX_PPDU_STATS_TLV_V:");
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_PDEV_STATS_TX_PPDU_STATS_TLV_V:\n");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "num_data_ppdus_legacy_su = %u",
- htt_stats_buf->num_data_ppdus_legacy_su);
+ len += scnprintf(buf + len, buf_len - len, "num_data_ppdus_legacy_su = %u\n",
+ htt_stats_buf->num_data_ppdus_legacy_su);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "num_data_ppdus_ac_su = %u",
- htt_stats_buf->num_data_ppdus_ac_su);
+ len += scnprintf(buf + len, buf_len - len, "num_data_ppdus_ac_su = %u\n",
+ htt_stats_buf->num_data_ppdus_ac_su);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "num_data_ppdus_ax_su = %u",
- htt_stats_buf->num_data_ppdus_ax_su);
+ len += scnprintf(buf + len, buf_len - len, "num_data_ppdus_ax_su = %u\n",
+ htt_stats_buf->num_data_ppdus_ax_su);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "num_data_ppdus_ac_su_txbf = %u",
- htt_stats_buf->num_data_ppdus_ac_su_txbf);
+ len += scnprintf(buf + len, buf_len - len, "num_data_ppdus_ac_su_txbf = %u\n",
+ htt_stats_buf->num_data_ppdus_ac_su_txbf);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "num_data_ppdus_ax_su_txbf = %u\n",
- htt_stats_buf->num_data_ppdus_ax_su_txbf);
+ len += scnprintf(buf + len, buf_len - len, "num_data_ppdus_ax_su_txbf = %u\n\n",
+ htt_stats_buf->num_data_ppdus_ax_su_txbf);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -353,25 +349,15 @@ htt_print_tx_pdev_stats_tried_mpdu_cnt_hist_tlv_v(const void *tag_buf,
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- char tried_mpdu_cnt_hist[HTT_MAX_STRING_LEN] = {0};
u32 num_elements = ((tag_len - sizeof(htt_stats_buf->hist_bin_size)) >> 2);
- u32 required_buffer_size = HTT_MAX_PRINT_CHAR_PER_ELEM * num_elements;
-
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "HTT_TX_PDEV_STATS_TRIED_MPDU_CNT_HIST_TLV_V:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "TRIED_MPDU_CNT_HIST_BIN_SIZE : %u",
- htt_stats_buf->hist_bin_size);
-
- if (required_buffer_size < HTT_MAX_STRING_LEN) {
- ARRAY_TO_STRING(tried_mpdu_cnt_hist,
- htt_stats_buf->tried_mpdu_cnt_hist,
- num_elements);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tried_mpdu_cnt_hist = %s\n",
- tried_mpdu_cnt_hist);
- } else {
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "INSUFFICIENT PRINT BUFFER\n");
- }
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_PDEV_STATS_TRIED_MPDU_CNT_HIST_TLV_V:\n");
+ len += scnprintf(buf + len, buf_len - len, "TRIED_MPDU_CNT_HIST_BIN_SIZE : %u\n",
+ htt_stats_buf->hist_bin_size);
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tried_mpdu_cnt_hist,
+ "tried_mpdu_cnt_hist", num_elements, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -390,14 +376,14 @@ static inline void htt_print_hw_stats_intr_misc_tlv(const void *tag_buf,
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
char hw_intr_name[HTT_STATS_MAX_HW_INTR_NAME_LEN + 1] = {0};
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_HW_STATS_INTR_MISC_TLV:");
+ len += scnprintf(buf + len, buf_len - len, "HTT_HW_STATS_INTR_MISC_TLV:\n");
memcpy(hw_intr_name, &(htt_stats_buf->hw_intr_name[0]),
HTT_STATS_MAX_HW_INTR_NAME_LEN);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "hw_intr_name = %s ", hw_intr_name);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mask = %u",
- htt_stats_buf->mask);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "count = %u\n",
- htt_stats_buf->count);
+ len += scnprintf(buf + len, buf_len - len, "hw_intr_name = %s\n", hw_intr_name);
+ len += scnprintf(buf + len, buf_len - len, "mask = %u\n",
+ htt_stats_buf->mask);
+ len += scnprintf(buf + len, buf_len - len, "count = %u\n\n",
+ htt_stats_buf->count);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -417,13 +403,13 @@ htt_print_hw_stats_wd_timeout_tlv(const void *tag_buf,
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
char hw_module_name[HTT_STATS_MAX_HW_MODULE_NAME_LEN + 1] = {0};
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_HW_STATS_WD_TIMEOUT_TLV:");
+ len += scnprintf(buf + len, buf_len - len, "HTT_HW_STATS_WD_TIMEOUT_TLV:\n");
memcpy(hw_module_name, &(htt_stats_buf->hw_module_name[0]),
HTT_STATS_MAX_HW_MODULE_NAME_LEN);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "hw_module_name = %s ",
- hw_module_name);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "count = %u",
- htt_stats_buf->count);
+ len += scnprintf(buf + len, buf_len - len, "hw_module_name = %s\n",
+ hw_module_name);
+ len += scnprintf(buf + len, buf_len - len, "count = %u\n",
+ htt_stats_buf->count);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -441,29 +427,29 @@ static inline void htt_print_hw_stats_pdev_errs_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_HW_STATS_PDEV_ERRS_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u",
- htt_stats_buf->mac_id__word & 0xFF);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_abort = %u",
- htt_stats_buf->tx_abort);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_abort_fail_count = %u",
- htt_stats_buf->tx_abort_fail_count);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_abort = %u",
- htt_stats_buf->rx_abort);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_abort_fail_count = %u",
- htt_stats_buf->rx_abort_fail_count);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "warm_reset = %u",
- htt_stats_buf->warm_reset);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "cold_reset = %u",
- htt_stats_buf->cold_reset);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_flush = %u",
- htt_stats_buf->tx_flush);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_glb_reset = %u",
- htt_stats_buf->tx_glb_reset);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_txq_reset = %u",
- htt_stats_buf->tx_txq_reset);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_timeout_reset = %u\n",
- htt_stats_buf->rx_timeout_reset);
+ len += scnprintf(buf + len, buf_len - len, "HTT_HW_STATS_PDEV_ERRS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n",
+ FIELD_GET(HTT_STATS_MAC_ID, htt_stats_buf->mac_id__word));
+ len += scnprintf(buf + len, buf_len - len, "tx_abort = %u\n",
+ htt_stats_buf->tx_abort);
+ len += scnprintf(buf + len, buf_len - len, "tx_abort_fail_count = %u\n",
+ htt_stats_buf->tx_abort_fail_count);
+ len += scnprintf(buf + len, buf_len - len, "rx_abort = %u\n",
+ htt_stats_buf->rx_abort);
+ len += scnprintf(buf + len, buf_len - len, "rx_abort_fail_count = %u\n",
+ htt_stats_buf->rx_abort_fail_count);
+ len += scnprintf(buf + len, buf_len - len, "warm_reset = %u\n",
+ htt_stats_buf->warm_reset);
+ len += scnprintf(buf + len, buf_len - len, "cold_reset = %u\n",
+ htt_stats_buf->cold_reset);
+ len += scnprintf(buf + len, buf_len - len, "tx_flush = %u\n",
+ htt_stats_buf->tx_flush);
+ len += scnprintf(buf + len, buf_len - len, "tx_glb_reset = %u\n",
+ htt_stats_buf->tx_glb_reset);
+ len += scnprintf(buf + len, buf_len - len, "tx_txq_reset = %u\n",
+ htt_stats_buf->tx_txq_reset);
+ len += scnprintf(buf + len, buf_len - len, "rx_timeout_reset = %u\n\n",
+ htt_stats_buf->rx_timeout_reset);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -481,35 +467,36 @@ static inline void htt_print_msdu_flow_stats_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_MSDU_FLOW_STATS_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "last_update_timestamp = %u",
- htt_stats_buf->last_update_timestamp);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "last_add_timestamp = %u",
- htt_stats_buf->last_add_timestamp);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "last_remove_timestamp = %u",
- htt_stats_buf->last_remove_timestamp);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "total_processed_msdu_count = %u",
- htt_stats_buf->total_processed_msdu_count);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "cur_msdu_count_in_flowq = %u",
- htt_stats_buf->cur_msdu_count_in_flowq);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "sw_peer_id = %u",
- htt_stats_buf->sw_peer_id);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_flow_no = %u",
- htt_stats_buf->tx_flow_no__tid_num__drop_rule & 0xFFFF);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tid_num = %u",
- (htt_stats_buf->tx_flow_no__tid_num__drop_rule & 0xF0000) >>
- 16);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "drop_rule = %u",
- (htt_stats_buf->tx_flow_no__tid_num__drop_rule & 0x100000) >>
- 20);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "last_cycle_enqueue_count = %u",
- htt_stats_buf->last_cycle_enqueue_count);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "last_cycle_dequeue_count = %u",
- htt_stats_buf->last_cycle_dequeue_count);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "last_cycle_drop_count = %u",
- htt_stats_buf->last_cycle_drop_count);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "current_drop_th = %u\n",
- htt_stats_buf->current_drop_th);
+ len += scnprintf(buf + len, buf_len - len, "HTT_MSDU_FLOW_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "last_update_timestamp = %u\n",
+ htt_stats_buf->last_update_timestamp);
+ len += scnprintf(buf + len, buf_len - len, "last_add_timestamp = %u\n",
+ htt_stats_buf->last_add_timestamp);
+ len += scnprintf(buf + len, buf_len - len, "last_remove_timestamp = %u\n",
+ htt_stats_buf->last_remove_timestamp);
+ len += scnprintf(buf + len, buf_len - len, "total_processed_msdu_count = %u\n",
+ htt_stats_buf->total_processed_msdu_count);
+ len += scnprintf(buf + len, buf_len - len, "cur_msdu_count_in_flowq = %u\n",
+ htt_stats_buf->cur_msdu_count_in_flowq);
+ len += scnprintf(buf + len, buf_len - len, "sw_peer_id = %u\n",
+ htt_stats_buf->sw_peer_id);
+ len += scnprintf(buf + len, buf_len - len, "tx_flow_no = %lu\n",
+ FIELD_GET(HTT_MSDU_FLOW_STATS_TX_FLOW_NO,
+ htt_stats_buf->tx_flow_no__tid_num__drop_rule));
+ len += scnprintf(buf + len, buf_len - len, "tid_num = %lu\n",
+ FIELD_GET(HTT_MSDU_FLOW_STATS_TID_NUM,
+ htt_stats_buf->tx_flow_no__tid_num__drop_rule));
+ len += scnprintf(buf + len, buf_len - len, "drop_rule = %lu\n",
+ FIELD_GET(HTT_MSDU_FLOW_STATS_DROP_RULE,
+ htt_stats_buf->tx_flow_no__tid_num__drop_rule));
+ len += scnprintf(buf + len, buf_len - len, "last_cycle_enqueue_count = %u\n",
+ htt_stats_buf->last_cycle_enqueue_count);
+ len += scnprintf(buf + len, buf_len - len, "last_cycle_dequeue_count = %u\n",
+ htt_stats_buf->last_cycle_dequeue_count);
+ len += scnprintf(buf + len, buf_len - len, "last_cycle_drop_count = %u\n",
+ htt_stats_buf->last_cycle_drop_count);
+ len += scnprintf(buf + len, buf_len - len, "current_drop_th = %u\n\n",
+ htt_stats_buf->current_drop_th);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -528,38 +515,41 @@ static inline void htt_print_tx_tid_stats_tlv(const void *tag_buf,
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
char tid_name[MAX_HTT_TID_NAME + 1] = {0};
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_TID_STATS_TLV:");
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_TID_STATS_TLV:\n");
memcpy(tid_name, &(htt_stats_buf->tid_name[0]), MAX_HTT_TID_NAME);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tid_name = %s ", tid_name);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "sw_peer_id = %u",
- htt_stats_buf->sw_peer_id__tid_num & 0xFFFF);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tid_num = %u",
- (htt_stats_buf->sw_peer_id__tid_num & 0xFFFF0000) >> 16);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "num_sched_pending = %u",
- htt_stats_buf->num_sched_pending__num_ppdu_in_hwq & 0xFF);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "num_ppdu_in_hwq = %u",
- (htt_stats_buf->num_sched_pending__num_ppdu_in_hwq &
- 0xFF00) >> 8);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tid_flags = 0x%x",
- htt_stats_buf->tid_flags);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "hw_queued = %u",
- htt_stats_buf->hw_queued);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "hw_reaped = %u",
- htt_stats_buf->hw_reaped);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdus_hw_filter = %u",
- htt_stats_buf->mpdus_hw_filter);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "qdepth_bytes = %u",
- htt_stats_buf->qdepth_bytes);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "qdepth_num_msdu = %u",
- htt_stats_buf->qdepth_num_msdu);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "qdepth_num_mpdu = %u",
- htt_stats_buf->qdepth_num_mpdu);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "last_scheduled_tsmp = %u",
- htt_stats_buf->last_scheduled_tsmp);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "pause_module_id = %u",
- htt_stats_buf->pause_module_id);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "block_module_id = %u\n",
- htt_stats_buf->block_module_id);
+ len += scnprintf(buf + len, buf_len - len, "tid_name = %s\n", tid_name);
+ len += scnprintf(buf + len, buf_len - len, "sw_peer_id = %lu\n",
+ FIELD_GET(HTT_TX_TID_STATS_SW_PEER_ID,
+ htt_stats_buf->sw_peer_id__tid_num));
+ len += scnprintf(buf + len, buf_len - len, "tid_num = %lu\n",
+ FIELD_GET(HTT_TX_TID_STATS_TID_NUM,
+ htt_stats_buf->sw_peer_id__tid_num));
+ len += scnprintf(buf + len, buf_len - len, "num_sched_pending = %lu\n",
+ FIELD_GET(HTT_TX_TID_STATS_NUM_SCHED_PENDING,
+ htt_stats_buf->num_sched_pending__num_ppdu_in_hwq));
+ len += scnprintf(buf + len, buf_len - len, "num_ppdu_in_hwq = %lu\n",
+ FIELD_GET(HTT_TX_TID_STATS_NUM_PPDU_IN_HWQ,
+ htt_stats_buf->num_sched_pending__num_ppdu_in_hwq));
+ len += scnprintf(buf + len, buf_len - len, "tid_flags = 0x%x\n",
+ htt_stats_buf->tid_flags);
+ len += scnprintf(buf + len, buf_len - len, "hw_queued = %u\n",
+ htt_stats_buf->hw_queued);
+ len += scnprintf(buf + len, buf_len - len, "hw_reaped = %u\n",
+ htt_stats_buf->hw_reaped);
+ len += scnprintf(buf + len, buf_len - len, "mpdus_hw_filter = %u\n",
+ htt_stats_buf->mpdus_hw_filter);
+ len += scnprintf(buf + len, buf_len - len, "qdepth_bytes = %u\n",
+ htt_stats_buf->qdepth_bytes);
+ len += scnprintf(buf + len, buf_len - len, "qdepth_num_msdu = %u\n",
+ htt_stats_buf->qdepth_num_msdu);
+ len += scnprintf(buf + len, buf_len - len, "qdepth_num_mpdu = %u\n",
+ htt_stats_buf->qdepth_num_mpdu);
+ len += scnprintf(buf + len, buf_len - len, "last_scheduled_tsmp = %u\n",
+ htt_stats_buf->last_scheduled_tsmp);
+ len += scnprintf(buf + len, buf_len - len, "pause_module_id = %u\n",
+ htt_stats_buf->pause_module_id);
+ len += scnprintf(buf + len, buf_len - len, "block_module_id = %u\n\n",
+ htt_stats_buf->block_module_id);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -578,42 +568,45 @@ static inline void htt_print_tx_tid_stats_v1_tlv(const void *tag_buf,
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
char tid_name[MAX_HTT_TID_NAME + 1] = {0};
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_TID_STATS_V1_TLV:");
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_TID_STATS_V1_TLV:\n");
memcpy(tid_name, &(htt_stats_buf->tid_name[0]), MAX_HTT_TID_NAME);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tid_name = %s ", tid_name);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "sw_peer_id = %u",
- htt_stats_buf->sw_peer_id__tid_num & 0xFFFF);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tid_num = %u",
- (htt_stats_buf->sw_peer_id__tid_num & 0xFFFF0000) >> 16);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "num_sched_pending = %u",
- htt_stats_buf->num_sched_pending__num_ppdu_in_hwq & 0xFF);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "num_ppdu_in_hwq = %u",
- (htt_stats_buf->num_sched_pending__num_ppdu_in_hwq &
- 0xFF00) >> 8);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tid_flags = 0x%x",
- htt_stats_buf->tid_flags);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "max_qdepth_bytes = %u",
- htt_stats_buf->max_qdepth_bytes);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "max_qdepth_n_msdus = %u",
- htt_stats_buf->max_qdepth_n_msdus);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rsvd = %u",
- htt_stats_buf->rsvd);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "qdepth_bytes = %u",
- htt_stats_buf->qdepth_bytes);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "qdepth_num_msdu = %u",
- htt_stats_buf->qdepth_num_msdu);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "qdepth_num_mpdu = %u",
- htt_stats_buf->qdepth_num_mpdu);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "last_scheduled_tsmp = %u",
- htt_stats_buf->last_scheduled_tsmp);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "pause_module_id = %u",
- htt_stats_buf->pause_module_id);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "block_module_id = %u",
- htt_stats_buf->block_module_id);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "allow_n_flags = 0x%x",
- htt_stats_buf->allow_n_flags);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "sendn_frms_allowed = %u\n",
- htt_stats_buf->sendn_frms_allowed);
+ len += scnprintf(buf + len, buf_len - len, "tid_name = %s\n", tid_name);
+ len += scnprintf(buf + len, buf_len - len, "sw_peer_id = %lu\n",
+ FIELD_GET(HTT_TX_TID_STATS_V1_SW_PEER_ID,
+ htt_stats_buf->sw_peer_id__tid_num));
+ len += scnprintf(buf + len, buf_len - len, "tid_num = %lu\n",
+ FIELD_GET(HTT_TX_TID_STATS_V1_TID_NUM,
+ htt_stats_buf->sw_peer_id__tid_num));
+ len += scnprintf(buf + len, buf_len - len, "num_sched_pending = %lu\n",
+ FIELD_GET(HTT_TX_TID_STATS_V1_NUM_SCHED_PENDING,
+ htt_stats_buf->num_sched_pending__num_ppdu_in_hwq));
+ len += scnprintf(buf + len, buf_len - len, "num_ppdu_in_hwq = %lu\n",
+ FIELD_GET(HTT_TX_TID_STATS_V1_NUM_PPDU_IN_HWQ,
+ htt_stats_buf->num_sched_pending__num_ppdu_in_hwq));
+ len += scnprintf(buf + len, buf_len - len, "tid_flags = 0x%x\n",
+ htt_stats_buf->tid_flags);
+ len += scnprintf(buf + len, buf_len - len, "max_qdepth_bytes = %u\n",
+ htt_stats_buf->max_qdepth_bytes);
+ len += scnprintf(buf + len, buf_len - len, "max_qdepth_n_msdus = %u\n",
+ htt_stats_buf->max_qdepth_n_msdus);
+ len += scnprintf(buf + len, buf_len - len, "rsvd = %u\n",
+ htt_stats_buf->rsvd);
+ len += scnprintf(buf + len, buf_len - len, "qdepth_bytes = %u\n",
+ htt_stats_buf->qdepth_bytes);
+ len += scnprintf(buf + len, buf_len - len, "qdepth_num_msdu = %u\n",
+ htt_stats_buf->qdepth_num_msdu);
+ len += scnprintf(buf + len, buf_len - len, "qdepth_num_mpdu = %u\n",
+ htt_stats_buf->qdepth_num_mpdu);
+ len += scnprintf(buf + len, buf_len - len, "last_scheduled_tsmp = %u\n",
+ htt_stats_buf->last_scheduled_tsmp);
+ len += scnprintf(buf + len, buf_len - len, "pause_module_id = %u\n",
+ htt_stats_buf->pause_module_id);
+ len += scnprintf(buf + len, buf_len - len, "block_module_id = %u\n",
+ htt_stats_buf->block_module_id);
+ len += scnprintf(buf + len, buf_len - len, "allow_n_flags = 0x%x\n",
+ htt_stats_buf->allow_n_flags);
+ len += scnprintf(buf + len, buf_len - len, "sendn_frms_allowed = %u\n\n",
+ htt_stats_buf->sendn_frms_allowed);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -632,21 +625,23 @@ static inline void htt_print_rx_tid_stats_tlv(const void *tag_buf,
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
char tid_name[MAX_HTT_TID_NAME + 1] = {0};
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_RX_TID_STATS_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "sw_peer_id = %u",
- htt_stats_buf->sw_peer_id__tid_num & 0xFFFF);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tid_num = %u",
- (htt_stats_buf->sw_peer_id__tid_num & 0xFFFF0000) >> 16);
+ len += scnprintf(buf + len, buf_len - len, "HTT_RX_TID_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "sw_peer_id = %lu\n",
+ FIELD_GET(HTT_RX_TID_STATS_SW_PEER_ID,
+ htt_stats_buf->sw_peer_id__tid_num));
+ len += scnprintf(buf + len, buf_len - len, "tid_num = %lu\n",
+ FIELD_GET(HTT_RX_TID_STATS_TID_NUM,
+ htt_stats_buf->sw_peer_id__tid_num));
memcpy(tid_name, &(htt_stats_buf->tid_name[0]), MAX_HTT_TID_NAME);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tid_name = %s ", tid_name);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "dup_in_reorder = %u",
- htt_stats_buf->dup_in_reorder);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "dup_past_outside_window = %u",
- htt_stats_buf->dup_past_outside_window);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "dup_past_within_window = %u",
- htt_stats_buf->dup_past_within_window);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rxdesc_err_decrypt = %u\n",
- htt_stats_buf->rxdesc_err_decrypt);
+ len += scnprintf(buf + len, buf_len - len, "tid_name = %s\n", tid_name);
+ len += scnprintf(buf + len, buf_len - len, "dup_in_reorder = %u\n",
+ htt_stats_buf->dup_in_reorder);
+ len += scnprintf(buf + len, buf_len - len, "dup_past_outside_window = %u\n",
+ htt_stats_buf->dup_past_outside_window);
+ len += scnprintf(buf + len, buf_len - len, "dup_past_within_window = %u\n",
+ htt_stats_buf->dup_past_within_window);
+ len += scnprintf(buf + len, buf_len - len, "rxdesc_err_decrypt = %u\n\n",
+ htt_stats_buf->rxdesc_err_decrypt);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -663,16 +658,14 @@ static inline void htt_print_counter_tlv(const void *tag_buf,
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- char counter_name[HTT_MAX_STRING_LEN] = {0};
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_COUNTER_TLV:");
+ len += scnprintf(buf + len, buf_len - len, "HTT_COUNTER_TLV:\n");
- ARRAY_TO_STRING(counter_name,
- htt_stats_buf->counter_name,
- HTT_MAX_COUNTER_NAME);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "counter_name = %s ", counter_name);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "count = %u\n",
- htt_stats_buf->count);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->counter_name,
+ "counter_name",
+ HTT_MAX_COUNTER_NAME, "\n");
+ len += scnprintf(buf + len, buf_len - len, "count = %u\n\n",
+ htt_stats_buf->count);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -690,35 +683,35 @@ static inline void htt_print_peer_stats_cmn_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_PEER_STATS_CMN_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ppdu_cnt = %u",
- htt_stats_buf->ppdu_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_cnt = %u",
- htt_stats_buf->mpdu_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "msdu_cnt = %u",
- htt_stats_buf->msdu_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "pause_bitmap = %u",
- htt_stats_buf->pause_bitmap);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "block_bitmap = %u",
- htt_stats_buf->block_bitmap);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "last_rssi = %d",
- htt_stats_buf->rssi);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "enqueued_count = %llu",
- htt_stats_buf->peer_enqueued_count_low |
- ((u64)htt_stats_buf->peer_enqueued_count_high << 32));
- len += HTT_DBG_OUT(buf + len, buf_len - len, "dequeued_count = %llu",
- htt_stats_buf->peer_dequeued_count_low |
- ((u64)htt_stats_buf->peer_dequeued_count_high << 32));
- len += HTT_DBG_OUT(buf + len, buf_len - len, "dropped_count = %llu",
- htt_stats_buf->peer_dropped_count_low |
- ((u64)htt_stats_buf->peer_dropped_count_high << 32));
- len += HTT_DBG_OUT(buf + len, buf_len - len, "transmitted_ppdu_bytes = %llu",
- htt_stats_buf->ppdu_transmitted_bytes_low |
- ((u64)htt_stats_buf->ppdu_transmitted_bytes_high << 32));
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ttl_removed_count = %u",
- htt_stats_buf->peer_ttl_removed_count);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "inactive_time = %u\n",
- htt_stats_buf->inactive_time);
+ len += scnprintf(buf + len, buf_len - len, "HTT_PEER_STATS_CMN_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "ppdu_cnt = %u\n",
+ htt_stats_buf->ppdu_cnt);
+ len += scnprintf(buf + len, buf_len - len, "mpdu_cnt = %u\n",
+ htt_stats_buf->mpdu_cnt);
+ len += scnprintf(buf + len, buf_len - len, "msdu_cnt = %u\n",
+ htt_stats_buf->msdu_cnt);
+ len += scnprintf(buf + len, buf_len - len, "pause_bitmap = %u\n",
+ htt_stats_buf->pause_bitmap);
+ len += scnprintf(buf + len, buf_len - len, "block_bitmap = %u\n",
+ htt_stats_buf->block_bitmap);
+ len += scnprintf(buf + len, buf_len - len, "last_rssi = %d\n",
+ htt_stats_buf->rssi);
+ len += scnprintf(buf + len, buf_len - len, "enqueued_count = %llu\n",
+ htt_stats_buf->peer_enqueued_count_low |
+ ((u64)htt_stats_buf->peer_enqueued_count_high << 32));
+ len += scnprintf(buf + len, buf_len - len, "dequeued_count = %llu\n",
+ htt_stats_buf->peer_dequeued_count_low |
+ ((u64)htt_stats_buf->peer_dequeued_count_high << 32));
+ len += scnprintf(buf + len, buf_len - len, "dropped_count = %llu\n",
+ htt_stats_buf->peer_dropped_count_low |
+ ((u64)htt_stats_buf->peer_dropped_count_high << 32));
+ len += scnprintf(buf + len, buf_len - len, "transmitted_ppdu_bytes = %llu\n",
+ htt_stats_buf->ppdu_transmitted_bytes_low |
+ ((u64)htt_stats_buf->ppdu_transmitted_bytes_high << 32));
+ len += scnprintf(buf + len, buf_len - len, "ttl_removed_count = %u\n",
+ htt_stats_buf->peer_ttl_removed_count);
+ len += scnprintf(buf + len, buf_len - len, "inactive_time = %u\n\n",
+ htt_stats_buf->inactive_time);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -736,29 +729,38 @@ static inline void htt_print_peer_details_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_PEER_DETAILS_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "peer_type = %u",
- htt_stats_buf->peer_type);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "sw_peer_id = %u",
- htt_stats_buf->sw_peer_id);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "vdev_id = %u",
- htt_stats_buf->vdev_pdev_ast_idx & 0xFF);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "pdev_id = %u",
- (htt_stats_buf->vdev_pdev_ast_idx & 0xFF00) >> 8);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ast_idx = %u",
- (htt_stats_buf->vdev_pdev_ast_idx & 0xFFFF0000) >> 16);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "mac_addr = %02x:%02x:%02x:%02x:%02x:%02x",
- htt_stats_buf->mac_addr.mac_addr_l32 & 0xFF,
- (htt_stats_buf->mac_addr.mac_addr_l32 & 0xFF00) >> 8,
- (htt_stats_buf->mac_addr.mac_addr_l32 & 0xFF0000) >> 16,
- (htt_stats_buf->mac_addr.mac_addr_l32 & 0xFF000000) >> 24,
- (htt_stats_buf->mac_addr.mac_addr_h16 & 0xFF),
- (htt_stats_buf->mac_addr.mac_addr_h16 & 0xFF00) >> 8);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "peer_flags = 0x%x",
- htt_stats_buf->peer_flags);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "qpeer_flags = 0x%x\n",
- htt_stats_buf->qpeer_flags);
+ len += scnprintf(buf + len, buf_len - len, "HTT_PEER_DETAILS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "peer_type = %u\n",
+ htt_stats_buf->peer_type);
+ len += scnprintf(buf + len, buf_len - len, "sw_peer_id = %u\n",
+ htt_stats_buf->sw_peer_id);
+ len += scnprintf(buf + len, buf_len - len, "vdev_id = %lu\n",
+ FIELD_GET(HTT_PEER_DETAILS_VDEV_ID,
+ htt_stats_buf->vdev_pdev_ast_idx));
+ len += scnprintf(buf + len, buf_len - len, "pdev_id = %lu\n",
+ FIELD_GET(HTT_PEER_DETAILS_PDEV_ID,
+ htt_stats_buf->vdev_pdev_ast_idx));
+ len += scnprintf(buf + len, buf_len - len, "ast_idx = %lu\n",
+ FIELD_GET(HTT_PEER_DETAILS_AST_IDX,
+ htt_stats_buf->vdev_pdev_ast_idx));
+ len += scnprintf(buf + len, buf_len - len,
+ "mac_addr = %02lx:%02lx:%02lx:%02lx:%02lx:%02lx\n",
+ FIELD_GET(HTT_MAC_ADDR_L32_0,
+ htt_stats_buf->mac_addr.mac_addr_l32),
+ FIELD_GET(HTT_MAC_ADDR_L32_1,
+ htt_stats_buf->mac_addr.mac_addr_l32),
+ FIELD_GET(HTT_MAC_ADDR_L32_2,
+ htt_stats_buf->mac_addr.mac_addr_l32),
+ FIELD_GET(HTT_MAC_ADDR_L32_3,
+ htt_stats_buf->mac_addr.mac_addr_l32),
+ FIELD_GET(HTT_MAC_ADDR_H16_0,
+ htt_stats_buf->mac_addr.mac_addr_h16),
+ FIELD_GET(HTT_MAC_ADDR_H16_1,
+ htt_stats_buf->mac_addr.mac_addr_h16));
+ len += scnprintf(buf + len, buf_len - len, "peer_flags = 0x%x\n",
+ htt_stats_buf->peer_flags);
+ len += scnprintf(buf + len, buf_len - len, "qpeer_flags = 0x%x\n\n",
+ htt_stats_buf->qpeer_flags);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -775,74 +777,40 @@ static inline void htt_print_tx_peer_rate_stats_tlv(const void *tag_buf,
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- char str_buf[HTT_MAX_STRING_LEN] = {0};
- char *tx_gi[HTT_TX_PEER_STATS_NUM_GI_COUNTERS] = {NULL};
u8 j;
- for (j = 0; j < HTT_TX_PEER_STATS_NUM_GI_COUNTERS; j++) {
- tx_gi[j] = kmalloc(HTT_MAX_STRING_LEN, GFP_ATOMIC);
- if (!tx_gi[j])
- goto fail;
- }
-
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_PEER_RATE_STATS_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_ldpc = %u",
- htt_stats_buf->tx_ldpc);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rts_cnt = %u",
- htt_stats_buf->rts_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ack_rssi = %u",
- htt_stats_buf->ack_rssi);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->tx_mcs,
- HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_mcs = %s ", str_buf);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->tx_su_mcs,
- HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_su_mcs = %s ", str_buf);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->tx_mu_mcs,
- HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_mu_mcs = %s ", str_buf);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf,
- htt_stats_buf->tx_nss,
- HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_nss = %s ", str_buf);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf,
- htt_stats_buf->tx_bw,
- HTT_TX_PDEV_STATS_NUM_BW_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_bw = %s ", str_buf);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->tx_stbc,
- HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_stbc = %s ", str_buf);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->tx_pream,
- HTT_TX_PDEV_STATS_NUM_PREAMBLE_TYPES);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_pream = %s ", str_buf);
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_PEER_RATE_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "tx_ldpc = %u\n",
+ htt_stats_buf->tx_ldpc);
+ len += scnprintf(buf + len, buf_len - len, "rts_cnt = %u\n",
+ htt_stats_buf->rts_cnt);
+ len += scnprintf(buf + len, buf_len - len, "ack_rssi = %u\n",
+ htt_stats_buf->ack_rssi);
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_mcs, "tx_mcs",
+ HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_su_mcs, "tx_su_mcs",
+ HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_mu_mcs, "tx_mu_mcs",
+ HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_nss, "tx_nss",
+ HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_bw, "tx_bw",
+ HTT_TX_PDEV_STATS_NUM_BW_COUNTERS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_stbc, "tx_stbc",
+ HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_pream, "tx_pream",
+ HTT_TX_PDEV_STATS_NUM_PREAMBLE_TYPES, "\n");
for (j = 0; j < HTT_TX_PEER_STATS_NUM_GI_COUNTERS; j++) {
- ARRAY_TO_STRING(tx_gi[j],
- htt_stats_buf->tx_gi[j],
- HTT_TX_PEER_STATS_NUM_MCS_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_gi[%u] = %s ",
- j, tx_gi[j]);
+ len += scnprintf(buf + len, buf_len - len,
+ "tx_gi[%u] = ", j);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_gi[j], NULL,
+ HTT_TX_PEER_STATS_NUM_MCS_COUNTERS, "\n");
}
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf,
- htt_stats_buf->tx_dcm,
- HTT_TX_PDEV_STATS_NUM_DCM_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_dcm = %s\n", str_buf);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_dcm, "tx_dcm",
+ HTT_TX_PDEV_STATS_NUM_DCM_COUNTERS, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -850,10 +818,6 @@ static inline void htt_print_tx_peer_rate_stats_tlv(const void *tag_buf,
buf[len] = 0;
stats_req->buf_len = len;
-
-fail:
- for (j = 0; j < HTT_TX_PEER_STATS_NUM_GI_COUNTERS; j++)
- kfree(tx_gi[j]);
}
static inline void htt_print_rx_peer_rate_stats_tlv(const void *tag_buf,
@@ -864,79 +828,48 @@ static inline void htt_print_rx_peer_rate_stats_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
u8 j;
- char *rssi_chain[HTT_RX_PEER_STATS_NUM_SPATIAL_STREAMS] = {NULL};
- char *rx_gi[HTT_RX_PEER_STATS_NUM_GI_COUNTERS] = {NULL};
- char str_buf[HTT_MAX_STRING_LEN] = {0};
-
- for (j = 0; j < HTT_RX_PEER_STATS_NUM_SPATIAL_STREAMS; j++) {
- rssi_chain[j] = kmalloc(HTT_MAX_STRING_LEN, GFP_ATOMIC);
- if (!rssi_chain[j])
- goto fail;
- }
-
- for (j = 0; j < HTT_RX_PEER_STATS_NUM_GI_COUNTERS; j++) {
- rx_gi[j] = kmalloc(HTT_MAX_STRING_LEN, GFP_ATOMIC);
- if (!rx_gi[j])
- goto fail;
- }
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_RX_PEER_RATE_STATS_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "nsts = %u",
- htt_stats_buf->nsts);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_ldpc = %u",
- htt_stats_buf->rx_ldpc);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rts_cnt = %u",
- htt_stats_buf->rts_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rssi_mgmt = %u",
- htt_stats_buf->rssi_mgmt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rssi_data = %u",
- htt_stats_buf->rssi_data);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rssi_comb = %u",
- htt_stats_buf->rssi_comb);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_mcs,
- HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_mcs = %s ", str_buf);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_nss,
- HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_nss = %s ", str_buf);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_dcm,
- HTT_RX_PDEV_STATS_NUM_DCM_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_dcm = %s ", str_buf);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_stbc,
- HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_stbc = %s ", str_buf);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_bw,
- HTT_RX_PDEV_STATS_NUM_BW_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_bw = %s ", str_buf);
+ len += scnprintf(buf + len, buf_len - len, "HTT_RX_PEER_RATE_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "nsts = %u\n",
+ htt_stats_buf->nsts);
+ len += scnprintf(buf + len, buf_len - len, "rx_ldpc = %u\n",
+ htt_stats_buf->rx_ldpc);
+ len += scnprintf(buf + len, buf_len - len, "rts_cnt = %u\n",
+ htt_stats_buf->rts_cnt);
+ len += scnprintf(buf + len, buf_len - len, "rssi_mgmt = %u\n",
+ htt_stats_buf->rssi_mgmt);
+ len += scnprintf(buf + len, buf_len - len, "rssi_data = %u\n",
+ htt_stats_buf->rssi_data);
+ len += scnprintf(buf + len, buf_len - len, "rssi_comb = %u\n",
+ htt_stats_buf->rssi_comb);
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_mcs, "rx_mcs",
+ HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_nss, "rx_nss",
+ HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_dcm, "rx_dcm",
+ HTT_RX_PDEV_STATS_NUM_DCM_COUNTERS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_stbc, "rx_stbc",
+ HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_bw, "rx_bw",
+ HTT_RX_PDEV_STATS_NUM_BW_COUNTERS, "\n");
for (j = 0; j < HTT_RX_PEER_STATS_NUM_SPATIAL_STREAMS; j++) {
- ARRAY_TO_STRING(rssi_chain[j], htt_stats_buf->rssi_chain[j],
- HTT_RX_PEER_STATS_NUM_BW_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rssi_chain[%u] = %s ",
- j, rssi_chain[j]);
+ len += scnprintf(buf + len, (buf_len - len),
+ "rssi_chain[%u] = ", j);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rssi_chain[j], NULL,
+ HTT_RX_PEER_STATS_NUM_BW_COUNTERS, "\n");
}
for (j = 0; j < HTT_RX_PEER_STATS_NUM_GI_COUNTERS; j++) {
- ARRAY_TO_STRING(rx_gi[j], htt_stats_buf->rx_gi[j],
- HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_gi[%u] = %s ",
- j, rx_gi[j]);
+ len += scnprintf(buf + len, (buf_len - len),
+ "rx_gi[%u] = ", j);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_gi[j], NULL,
+ HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
}
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_pream,
- HTT_RX_PDEV_STATS_NUM_PREAMBLE_TYPES);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_pream = %s\n", str_buf);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_pream, "rx_pream",
+ HTT_RX_PDEV_STATS_NUM_PREAMBLE_TYPES, "\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -944,13 +877,6 @@ static inline void htt_print_rx_peer_rate_stats_tlv(const void *tag_buf,
buf[len] = 0;
stats_req->buf_len = len;
-
-fail:
- for (j = 0; j < HTT_RX_PEER_STATS_NUM_SPATIAL_STREAMS; j++)
- kfree(rssi_chain[j]);
-
- for (j = 0; j < HTT_RX_PEER_STATS_NUM_GI_COUNTERS; j++)
- kfree(rx_gi[j]);
}
static inline void
@@ -962,13 +888,13 @@ htt_print_tx_hwq_mu_mimo_sch_stats_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_HWQ_MU_MIMO_SCH_STATS_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mu_mimo_sch_posted = %u",
- htt_stats_buf->mu_mimo_sch_posted);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mu_mimo_sch_failed = %u",
- htt_stats_buf->mu_mimo_sch_failed);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mu_mimo_ppdu_posted = %u\n",
- htt_stats_buf->mu_mimo_ppdu_posted);
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_HWQ_MU_MIMO_SCH_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mu_mimo_sch_posted = %u\n",
+ htt_stats_buf->mu_mimo_sch_posted);
+ len += scnprintf(buf + len, buf_len - len, "mu_mimo_sch_failed = %u\n",
+ htt_stats_buf->mu_mimo_sch_failed);
+ len += scnprintf(buf + len, buf_len - len, "mu_mimo_ppdu_posted = %u\n\n",
+ htt_stats_buf->mu_mimo_ppdu_posted);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -987,22 +913,22 @@ htt_print_tx_hwq_mu_mimo_mpdu_stats_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "HTT_TX_HWQ_MU_MIMO_MPDU_STATS_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mu_mimo_mpdus_queued_usr = %u",
- htt_stats_buf->mu_mimo_mpdus_queued_usr);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mu_mimo_mpdus_tried_usr = %u",
- htt_stats_buf->mu_mimo_mpdus_tried_usr);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mu_mimo_mpdus_failed_usr = %u",
- htt_stats_buf->mu_mimo_mpdus_failed_usr);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mu_mimo_mpdus_requeued_usr = %u",
- htt_stats_buf->mu_mimo_mpdus_requeued_usr);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mu_mimo_err_no_ba_usr = %u",
- htt_stats_buf->mu_mimo_err_no_ba_usr);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mu_mimo_mpdu_underrun_usr = %u",
- htt_stats_buf->mu_mimo_mpdu_underrun_usr);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mu_mimo_ampdu_underrun_usr = %u\n",
- htt_stats_buf->mu_mimo_ampdu_underrun_usr);
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_HWQ_MU_MIMO_MPDU_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mu_mimo_mpdus_queued_usr = %u\n",
+ htt_stats_buf->mu_mimo_mpdus_queued_usr);
+ len += scnprintf(buf + len, buf_len - len, "mu_mimo_mpdus_tried_usr = %u\n",
+ htt_stats_buf->mu_mimo_mpdus_tried_usr);
+ len += scnprintf(buf + len, buf_len - len, "mu_mimo_mpdus_failed_usr = %u\n",
+ htt_stats_buf->mu_mimo_mpdus_failed_usr);
+ len += scnprintf(buf + len, buf_len - len, "mu_mimo_mpdus_requeued_usr = %u\n",
+ htt_stats_buf->mu_mimo_mpdus_requeued_usr);
+ len += scnprintf(buf + len, buf_len - len, "mu_mimo_err_no_ba_usr = %u\n",
+ htt_stats_buf->mu_mimo_err_no_ba_usr);
+ len += scnprintf(buf + len, buf_len - len, "mu_mimo_mpdu_underrun_usr = %u\n",
+ htt_stats_buf->mu_mimo_mpdu_underrun_usr);
+ len += scnprintf(buf + len, buf_len - len, "mu_mimo_ampdu_underrun_usr = %u\n\n",
+ htt_stats_buf->mu_mimo_ampdu_underrun_usr);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -1021,11 +947,13 @@ htt_print_tx_hwq_mu_mimo_cmn_stats_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_HWQ_MU_MIMO_CMN_STATS_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u",
- htt_stats_buf->mac_id__hwq_id__word & 0xFF);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "hwq_id = %u\n",
- (htt_stats_buf->mac_id__hwq_id__word & 0xFF00) >> 8);
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_HWQ_MU_MIMO_CMN_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n",
+ FIELD_GET(HTT_TX_HWQ_STATS_MAC_ID,
+ htt_stats_buf->mac_id__hwq_id__word));
+ len += scnprintf(buf + len, buf_len - len, "hwq_id = %lu\n\n",
+ FIELD_GET(HTT_TX_HWQ_STATS_HWQ_ID,
+ htt_stats_buf->mac_id__hwq_id__word));
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -1044,51 +972,53 @@ htt_print_tx_hwq_stats_cmn_tlv(const void *tag_buf, struct debug_htt_stats_req *
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
/* TODO: HKDBG */
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_HWQ_STATS_CMN_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u",
- htt_stats_buf->mac_id__hwq_id__word & 0xFF);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "hwq_id = %u",
- (htt_stats_buf->mac_id__hwq_id__word & 0xFF00) >> 8);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "xretry = %u",
- htt_stats_buf->xretry);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "underrun_cnt = %u",
- htt_stats_buf->underrun_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "flush_cnt = %u",
- htt_stats_buf->flush_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "filt_cnt = %u",
- htt_stats_buf->filt_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "null_mpdu_bmap = %u",
- htt_stats_buf->null_mpdu_bmap);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "user_ack_failure = %u",
- htt_stats_buf->user_ack_failure);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ack_tlv_proc = %u",
- htt_stats_buf->ack_tlv_proc);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "sched_id_proc = %u",
- htt_stats_buf->sched_id_proc);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "null_mpdu_tx_count = %u",
- htt_stats_buf->null_mpdu_tx_count);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_bmap_not_recvd = %u",
- htt_stats_buf->mpdu_bmap_not_recvd);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "num_bar = %u",
- htt_stats_buf->num_bar);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rts = %u",
- htt_stats_buf->rts);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "cts2self = %u",
- htt_stats_buf->cts2self);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "qos_null = %u",
- htt_stats_buf->qos_null);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_tried_cnt = %u",
- htt_stats_buf->mpdu_tried_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_queued_cnt = %u",
- htt_stats_buf->mpdu_queued_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_ack_fail_cnt = %u",
- htt_stats_buf->mpdu_ack_fail_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_filt_cnt = %u",
- htt_stats_buf->mpdu_filt_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "false_mpdu_ack_count = %u",
- htt_stats_buf->false_mpdu_ack_count);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "txq_timeout = %u\n",
- htt_stats_buf->txq_timeout);
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_HWQ_STATS_CMN_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n",
+ FIELD_GET(HTT_TX_HWQ_STATS_MAC_ID,
+ htt_stats_buf->mac_id__hwq_id__word));
+ len += scnprintf(buf + len, buf_len - len, "hwq_id = %lu\n",
+ FIELD_GET(HTT_TX_HWQ_STATS_HWQ_ID,
+ htt_stats_buf->mac_id__hwq_id__word));
+ len += scnprintf(buf + len, buf_len - len, "xretry = %u\n",
+ htt_stats_buf->xretry);
+ len += scnprintf(buf + len, buf_len - len, "underrun_cnt = %u\n",
+ htt_stats_buf->underrun_cnt);
+ len += scnprintf(buf + len, buf_len - len, "flush_cnt = %u\n",
+ htt_stats_buf->flush_cnt);
+ len += scnprintf(buf + len, buf_len - len, "filt_cnt = %u\n",
+ htt_stats_buf->filt_cnt);
+ len += scnprintf(buf + len, buf_len - len, "null_mpdu_bmap = %u\n",
+ htt_stats_buf->null_mpdu_bmap);
+ len += scnprintf(buf + len, buf_len - len, "user_ack_failure = %u\n",
+ htt_stats_buf->user_ack_failure);
+ len += scnprintf(buf + len, buf_len - len, "ack_tlv_proc = %u\n",
+ htt_stats_buf->ack_tlv_proc);
+ len += scnprintf(buf + len, buf_len - len, "sched_id_proc = %u\n",
+ htt_stats_buf->sched_id_proc);
+ len += scnprintf(buf + len, buf_len - len, "null_mpdu_tx_count = %u\n",
+ htt_stats_buf->null_mpdu_tx_count);
+ len += scnprintf(buf + len, buf_len - len, "mpdu_bmap_not_recvd = %u\n",
+ htt_stats_buf->mpdu_bmap_not_recvd);
+ len += scnprintf(buf + len, buf_len - len, "num_bar = %u\n",
+ htt_stats_buf->num_bar);
+ len += scnprintf(buf + len, buf_len - len, "rts = %u\n",
+ htt_stats_buf->rts);
+ len += scnprintf(buf + len, buf_len - len, "cts2self = %u\n",
+ htt_stats_buf->cts2self);
+ len += scnprintf(buf + len, buf_len - len, "qos_null = %u\n",
+ htt_stats_buf->qos_null);
+ len += scnprintf(buf + len, buf_len - len, "mpdu_tried_cnt = %u\n",
+ htt_stats_buf->mpdu_tried_cnt);
+ len += scnprintf(buf + len, buf_len - len, "mpdu_queued_cnt = %u\n",
+ htt_stats_buf->mpdu_queued_cnt);
+ len += scnprintf(buf + len, buf_len - len, "mpdu_ack_fail_cnt = %u\n",
+ htt_stats_buf->mpdu_ack_fail_cnt);
+ len += scnprintf(buf + len, buf_len - len, "mpdu_filt_cnt = %u\n",
+ htt_stats_buf->mpdu_filt_cnt);
+ len += scnprintf(buf + len, buf_len - len, "false_mpdu_ack_count = %u\n",
+ htt_stats_buf->false_mpdu_ack_count);
+ len += scnprintf(buf + len, buf_len - len, "txq_timeout = %u\n\n",
+ htt_stats_buf->txq_timeout);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -1108,17 +1038,14 @@ htt_print_tx_hwq_difs_latency_stats_tlv_v(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
u16 data_len = min_t(u16, (tag_len >> 2), HTT_TX_HWQ_MAX_DIFS_LATENCY_BINS);
- char difs_latency_hist[HTT_MAX_STRING_LEN] = {0};
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "HTT_TX_HWQ_DIFS_LATENCY_STATS_TLV_V:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "hist_intvl = %u",
- htt_stats_buf->hist_intvl);
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_HWQ_DIFS_LATENCY_STATS_TLV_V:\n");
+ len += scnprintf(buf + len, buf_len - len, "hist_intvl = %u\n",
+ htt_stats_buf->hist_intvl);
- ARRAY_TO_STRING(difs_latency_hist, htt_stats_buf->difs_latency_hist,
- data_len);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "difs_latency_hist = %s\n",
- difs_latency_hist);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->difs_latency_hist,
+ "difs_latency_hist", data_len, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -1138,16 +1065,14 @@ htt_print_tx_hwq_cmd_result_stats_tlv_v(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
u16 data_len;
- char cmd_result[HTT_MAX_STRING_LEN] = {0};
data_len = min_t(u16, (tag_len >> 2), HTT_TX_HWQ_MAX_CMD_RESULT_STATS);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "HTT_TX_HWQ_CMD_RESULT_STATS_TLV_V:");
-
- ARRAY_TO_STRING(cmd_result, htt_stats_buf->cmd_result, data_len);
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_HWQ_CMD_RESULT_STATS_TLV_V:\n");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "cmd_result = %s\n", cmd_result);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->cmd_result, "cmd_result",
+ data_len, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -1167,15 +1092,13 @@ htt_print_tx_hwq_cmd_stall_stats_tlv_v(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
u16 num_elems;
- char cmd_stall_status[HTT_MAX_STRING_LEN] = {0};
num_elems = min_t(u16, (tag_len >> 2), HTT_TX_HWQ_MAX_CMD_STALL_STATS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_HWQ_CMD_STALL_STATS_TLV_V:");
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_HWQ_CMD_STALL_STATS_TLV_V:\n");
- ARRAY_TO_STRING(cmd_stall_status, htt_stats_buf->cmd_stall_status, num_elems);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "cmd_stall_status = %s\n",
- cmd_stall_status);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->cmd_stall_status,
+ "cmd_stall_status", num_elems, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -1195,15 +1118,14 @@ htt_print_tx_hwq_fes_result_stats_tlv_v(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
u16 num_elems;
- char fes_result[HTT_MAX_STRING_LEN] = {0};
num_elems = min_t(u16, (tag_len >> 2), HTT_TX_HWQ_MAX_FES_RESULT_STATS);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "HTT_TX_HWQ_FES_RESULT_STATS_TLV_V:");
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_HWQ_FES_RESULT_STATS_TLV_V:\n");
- ARRAY_TO_STRING(fes_result, htt_stats_buf->fes_result, num_elems);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fes_result = %s\n", fes_result);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->fes_result, "fes_result",
+ num_elems, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -1222,27 +1144,16 @@ htt_print_tx_hwq_tried_mpdu_cnt_hist_tlv_v(const void *tag_buf,
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- char tried_mpdu_cnt_hist[HTT_MAX_STRING_LEN] = {0};
u32 num_elements = ((tag_len -
sizeof(htt_stats_buf->hist_bin_size)) >> 2);
- u32 required_buffer_size = HTT_MAX_PRINT_CHAR_PER_ELEM * num_elements;
-
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "HTT_TX_HWQ_TRIED_MPDU_CNT_HIST_TLV_V:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "TRIED_MPDU_CNT_HIST_BIN_SIZE : %u",
- htt_stats_buf->hist_bin_size);
-
- if (required_buffer_size < HTT_MAX_STRING_LEN) {
- ARRAY_TO_STRING(tried_mpdu_cnt_hist,
- htt_stats_buf->tried_mpdu_cnt_hist,
- num_elements);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "tried_mpdu_cnt_hist = %s\n",
- tried_mpdu_cnt_hist);
- } else {
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "INSUFFICIENT PRINT BUFFER ");
- }
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_HWQ_TRIED_MPDU_CNT_HIST_TLV_V:\n");
+ len += scnprintf(buf + len, buf_len - len, "TRIED_MPDU_CNT_HIST_BIN_SIZE : %u\n",
+ htt_stats_buf->hist_bin_size);
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tried_mpdu_cnt_hist,
+ "tried_mpdu_cnt_hist", num_elements, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -1261,23 +1172,14 @@ htt_print_tx_hwq_txop_used_cnt_hist_tlv_v(const void *tag_buf,
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- char txop_used_cnt_hist[HTT_MAX_STRING_LEN] = {0};
u32 num_elements = tag_len >> 2;
- u32 required_buffer_size = HTT_MAX_PRINT_CHAR_PER_ELEM * num_elements;
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "HTT_TX_HWQ_TXOP_USED_CNT_HIST_TLV_V:");
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_HWQ_TXOP_USED_CNT_HIST_TLV_V:\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->txop_used_cnt_hist,
+ "txop_used_cnt_hist", num_elements, "\n\n");
- if (required_buffer_size < HTT_MAX_STRING_LEN) {
- ARRAY_TO_STRING(txop_used_cnt_hist,
- htt_stats_buf->txop_used_cnt_hist,
- num_elements);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "txop_used_cnt_hist = %s\n",
- txop_used_cnt_hist);
- } else {
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "INSUFFICIENT PRINT BUFFER ");
- }
if (len >= buf_len)
buf[buf_len - 1] = 0;
else
@@ -1300,86 +1202,86 @@ static inline void htt_print_tx_sounding_stats_tlv(const void *tag_buf,
const u32 *cbf_160 = htt_stats_buf->cbf_160;
if (htt_stats_buf->tx_sounding_mode == HTT_TX_AC_SOUNDING_MODE) {
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "\nHTT_TX_AC_SOUNDING_STATS_TLV:\n");
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ac_cbf_20 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u ",
- cbf_20[HTT_IMPLICIT_TXBF_STEER_STATS],
- cbf_20[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
- cbf_20[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
- cbf_20[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
- cbf_20[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ac_cbf_40 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u",
- cbf_40[HTT_IMPLICIT_TXBF_STEER_STATS],
- cbf_40[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
- cbf_40[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
- cbf_40[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
- cbf_40[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ac_cbf_80 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u",
- cbf_80[HTT_IMPLICIT_TXBF_STEER_STATS],
- cbf_80[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
- cbf_80[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
- cbf_80[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
- cbf_80[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ac_cbf_160 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u",
- cbf_160[HTT_IMPLICIT_TXBF_STEER_STATS],
- cbf_160[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
- cbf_160[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
- cbf_160[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
- cbf_160[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
+ len += scnprintf(buf + len, buf_len - len,
+ "\nHTT_TX_AC_SOUNDING_STATS_TLV:\n\n");
+ len += scnprintf(buf + len, buf_len - len,
+ "ac_cbf_20 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u\n",
+ cbf_20[HTT_IMPLICIT_TXBF_STEER_STATS],
+ cbf_20[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
+ cbf_20[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
+ cbf_20[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
+ cbf_20[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ac_cbf_40 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u\n",
+ cbf_40[HTT_IMPLICIT_TXBF_STEER_STATS],
+ cbf_40[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
+ cbf_40[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
+ cbf_40[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
+ cbf_40[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ac_cbf_80 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u\n",
+ cbf_80[HTT_IMPLICIT_TXBF_STEER_STATS],
+ cbf_80[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
+ cbf_80[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
+ cbf_80[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
+ cbf_80[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ac_cbf_160 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u\n",
+ cbf_160[HTT_IMPLICIT_TXBF_STEER_STATS],
+ cbf_160[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
+ cbf_160[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
+ cbf_160[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
+ cbf_160[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
for (i = 0; i < HTT_TX_PDEV_STATS_NUM_AC_MUMIMO_USER_STATS; i++) {
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "Sounding User %u = 20MHz: %u, 40MHz : %u, 80MHz: %u, 160MHz: %u ",
- i,
- htt_stats_buf->sounding[0],
- htt_stats_buf->sounding[1],
- htt_stats_buf->sounding[2],
- htt_stats_buf->sounding[3]);
+ len += scnprintf(buf + len, buf_len - len,
+ "Sounding User %u = 20MHz: %u, 40MHz : %u, 80MHz: %u, 160MHz: %u\n",
+ i,
+ htt_stats_buf->sounding[0],
+ htt_stats_buf->sounding[1],
+ htt_stats_buf->sounding[2],
+ htt_stats_buf->sounding[3]);
}
} else if (htt_stats_buf->tx_sounding_mode == HTT_TX_AX_SOUNDING_MODE) {
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "\nHTT_TX_AX_SOUNDING_STATS_TLV:\n");
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ax_cbf_20 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u ",
- cbf_20[HTT_IMPLICIT_TXBF_STEER_STATS],
- cbf_20[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
- cbf_20[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
- cbf_20[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
- cbf_20[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ax_cbf_40 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u",
- cbf_40[HTT_IMPLICIT_TXBF_STEER_STATS],
- cbf_40[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
- cbf_40[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
- cbf_40[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
- cbf_40[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ax_cbf_80 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u",
- cbf_80[HTT_IMPLICIT_TXBF_STEER_STATS],
- cbf_80[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
- cbf_80[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
- cbf_80[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
- cbf_80[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ax_cbf_160 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u",
- cbf_160[HTT_IMPLICIT_TXBF_STEER_STATS],
- cbf_160[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
- cbf_160[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
- cbf_160[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
- cbf_160[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
+ len += scnprintf(buf + len, buf_len - len,
+ "\nHTT_TX_AX_SOUNDING_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_cbf_20 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u\n",
+ cbf_20[HTT_IMPLICIT_TXBF_STEER_STATS],
+ cbf_20[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
+ cbf_20[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
+ cbf_20[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
+ cbf_20[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_cbf_40 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u\n",
+ cbf_40[HTT_IMPLICIT_TXBF_STEER_STATS],
+ cbf_40[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
+ cbf_40[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
+ cbf_40[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
+ cbf_40[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_cbf_80 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u\n",
+ cbf_80[HTT_IMPLICIT_TXBF_STEER_STATS],
+ cbf_80[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
+ cbf_80[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
+ cbf_80[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
+ cbf_80[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_cbf_160 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u\n",
+ cbf_160[HTT_IMPLICIT_TXBF_STEER_STATS],
+ cbf_160[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
+ cbf_160[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
+ cbf_160[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
+ cbf_160[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
for (i = 0; i < HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS; i++) {
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "Sounding User %u = 20MHz: %u, 40MHz : %u, 80MHz: %u, 160MHz: %u ",
- i,
- htt_stats_buf->sounding[0],
- htt_stats_buf->sounding[1],
- htt_stats_buf->sounding[2],
- htt_stats_buf->sounding[3]);
+ len += scnprintf(buf + len, buf_len - len,
+ "Sounding User %u = 20MHz: %u, 40MHz : %u, 80MHz: %u, 160MHz: %u\n",
+ i,
+ htt_stats_buf->sounding[0],
+ htt_stats_buf->sounding[1],
+ htt_stats_buf->sounding[2],
+ htt_stats_buf->sounding[3]);
}
}
@@ -1400,31 +1302,31 @@ htt_print_tx_selfgen_cmn_stats_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_SELFGEN_CMN_STATS_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u",
- htt_stats_buf->mac_id__word & 0xFF);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "su_bar = %u",
- htt_stats_buf->su_bar);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rts = %u",
- htt_stats_buf->rts);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "cts2self = %u",
- htt_stats_buf->cts2self);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "qos_null = %u",
- htt_stats_buf->qos_null);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "delayed_bar_1 = %u",
- htt_stats_buf->delayed_bar_1);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "delayed_bar_2 = %u",
- htt_stats_buf->delayed_bar_2);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "delayed_bar_3 = %u",
- htt_stats_buf->delayed_bar_3);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "delayed_bar_4 = %u",
- htt_stats_buf->delayed_bar_4);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "delayed_bar_5 = %u",
- htt_stats_buf->delayed_bar_5);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "delayed_bar_6 = %u",
- htt_stats_buf->delayed_bar_6);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "delayed_bar_7 = %u\n",
- htt_stats_buf->delayed_bar_7);
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_SELFGEN_CMN_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n",
+ FIELD_GET(HTT_STATS_MAC_ID, htt_stats_buf->mac_id__word));
+ len += scnprintf(buf + len, buf_len - len, "su_bar = %u\n",
+ htt_stats_buf->su_bar);
+ len += scnprintf(buf + len, buf_len - len, "rts = %u\n",
+ htt_stats_buf->rts);
+ len += scnprintf(buf + len, buf_len - len, "cts2self = %u\n",
+ htt_stats_buf->cts2self);
+ len += scnprintf(buf + len, buf_len - len, "qos_null = %u\n",
+ htt_stats_buf->qos_null);
+ len += scnprintf(buf + len, buf_len - len, "delayed_bar_1 = %u\n",
+ htt_stats_buf->delayed_bar_1);
+ len += scnprintf(buf + len, buf_len - len, "delayed_bar_2 = %u\n",
+ htt_stats_buf->delayed_bar_2);
+ len += scnprintf(buf + len, buf_len - len, "delayed_bar_3 = %u\n",
+ htt_stats_buf->delayed_bar_3);
+ len += scnprintf(buf + len, buf_len - len, "delayed_bar_4 = %u\n",
+ htt_stats_buf->delayed_bar_4);
+ len += scnprintf(buf + len, buf_len - len, "delayed_bar_5 = %u\n",
+ htt_stats_buf->delayed_bar_5);
+ len += scnprintf(buf + len, buf_len - len, "delayed_bar_6 = %u\n",
+ htt_stats_buf->delayed_bar_6);
+ len += scnprintf(buf + len, buf_len - len, "delayed_bar_7 = %u\n\n",
+ htt_stats_buf->delayed_bar_7);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -1443,21 +1345,21 @@ htt_print_tx_selfgen_ac_stats_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_SELFGEN_AC_STATS_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_su_ndpa = %u",
- htt_stats_buf->ac_su_ndpa);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_su_ndp = %u",
- htt_stats_buf->ac_su_ndp);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_mu_mimo_ndpa = %u",
- htt_stats_buf->ac_mu_mimo_ndpa);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_mu_mimo_ndp = %u",
- htt_stats_buf->ac_mu_mimo_ndp);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_mu_mimo_brpoll_1 = %u",
- htt_stats_buf->ac_mu_mimo_brpoll_1);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_mu_mimo_brpoll_2 = %u",
- htt_stats_buf->ac_mu_mimo_brpoll_2);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_mu_mimo_brpoll_3 = %u\n",
- htt_stats_buf->ac_mu_mimo_brpoll_3);
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_SELFGEN_AC_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "ac_su_ndpa = %u\n",
+ htt_stats_buf->ac_su_ndpa);
+ len += scnprintf(buf + len, buf_len - len, "ac_su_ndp = %u\n",
+ htt_stats_buf->ac_su_ndp);
+ len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_ndpa = %u\n",
+ htt_stats_buf->ac_mu_mimo_ndpa);
+ len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_ndp = %u\n",
+ htt_stats_buf->ac_mu_mimo_ndp);
+ len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_brpoll_1 = %u\n",
+ htt_stats_buf->ac_mu_mimo_brpoll_1);
+ len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_brpoll_2 = %u\n",
+ htt_stats_buf->ac_mu_mimo_brpoll_2);
+ len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_brpoll_3 = %u\n\n",
+ htt_stats_buf->ac_mu_mimo_brpoll_3);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -1476,37 +1378,37 @@ htt_print_tx_selfgen_ax_stats_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_SELFGEN_AX_STATS_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_su_ndpa = %u",
- htt_stats_buf->ax_su_ndpa);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_su_ndp = %u",
- htt_stats_buf->ax_su_ndp);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_ndpa = %u",
- htt_stats_buf->ax_mu_mimo_ndpa);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_ndp = %u",
- htt_stats_buf->ax_mu_mimo_ndp);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_brpoll_1 = %u",
- htt_stats_buf->ax_mu_mimo_brpoll_1);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_brpoll_2 = %u",
- htt_stats_buf->ax_mu_mimo_brpoll_2);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_brpoll_3 = %u",
- htt_stats_buf->ax_mu_mimo_brpoll_3);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_brpoll_4 = %u",
- htt_stats_buf->ax_mu_mimo_brpoll_4);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_brpoll_5 = %u",
- htt_stats_buf->ax_mu_mimo_brpoll_5);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_brpoll_6 = %u",
- htt_stats_buf->ax_mu_mimo_brpoll_6);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_brpoll_7 = %u",
- htt_stats_buf->ax_mu_mimo_brpoll_7);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_basic_trigger = %u",
- htt_stats_buf->ax_basic_trigger);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_bsr_trigger = %u",
- htt_stats_buf->ax_bsr_trigger);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_bar_trigger = %u",
- htt_stats_buf->ax_mu_bar_trigger);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_rts_trigger = %u\n",
- htt_stats_buf->ax_mu_rts_trigger);
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_SELFGEN_AX_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "ax_su_ndpa = %u\n",
+ htt_stats_buf->ax_su_ndpa);
+ len += scnprintf(buf + len, buf_len - len, "ax_su_ndp = %u\n",
+ htt_stats_buf->ax_su_ndp);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_ndpa = %u\n",
+ htt_stats_buf->ax_mu_mimo_ndpa);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_ndp = %u\n",
+ htt_stats_buf->ax_mu_mimo_ndp);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brpoll_1 = %u\n",
+ htt_stats_buf->ax_mu_mimo_brpoll_1);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brpoll_2 = %u\n",
+ htt_stats_buf->ax_mu_mimo_brpoll_2);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brpoll_3 = %u\n",
+ htt_stats_buf->ax_mu_mimo_brpoll_3);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brpoll_4 = %u\n",
+ htt_stats_buf->ax_mu_mimo_brpoll_4);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brpoll_5 = %u\n",
+ htt_stats_buf->ax_mu_mimo_brpoll_5);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brpoll_6 = %u\n",
+ htt_stats_buf->ax_mu_mimo_brpoll_6);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brpoll_7 = %u\n",
+ htt_stats_buf->ax_mu_mimo_brpoll_7);
+ len += scnprintf(buf + len, buf_len - len, "ax_basic_trigger = %u\n",
+ htt_stats_buf->ax_basic_trigger);
+ len += scnprintf(buf + len, buf_len - len, "ax_bsr_trigger = %u\n",
+ htt_stats_buf->ax_bsr_trigger);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_bar_trigger = %u\n",
+ htt_stats_buf->ax_mu_bar_trigger);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_rts_trigger = %u\n\n",
+ htt_stats_buf->ax_mu_rts_trigger);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -1525,21 +1427,21 @@ htt_print_tx_selfgen_ac_err_stats_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_SELFGEN_AC_ERR_STATS_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_su_ndp_err = %u",
- htt_stats_buf->ac_su_ndp_err);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_su_ndpa_err = %u",
- htt_stats_buf->ac_su_ndpa_err);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_mu_mimo_ndpa_err = %u",
- htt_stats_buf->ac_mu_mimo_ndpa_err);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_mu_mimo_ndp_err = %u",
- htt_stats_buf->ac_mu_mimo_ndp_err);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_mu_mimo_brp1_err = %u",
- htt_stats_buf->ac_mu_mimo_brp1_err);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_mu_mimo_brp2_err = %u",
- htt_stats_buf->ac_mu_mimo_brp2_err);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_mu_mimo_brp3_err = %u\n",
- htt_stats_buf->ac_mu_mimo_brp3_err);
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_SELFGEN_AC_ERR_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "ac_su_ndp_err = %u\n",
+ htt_stats_buf->ac_su_ndp_err);
+ len += scnprintf(buf + len, buf_len - len, "ac_su_ndpa_err = %u\n",
+ htt_stats_buf->ac_su_ndpa_err);
+ len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_ndpa_err = %u\n",
+ htt_stats_buf->ac_mu_mimo_ndpa_err);
+ len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_ndp_err = %u\n",
+ htt_stats_buf->ac_mu_mimo_ndp_err);
+ len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_brp1_err = %u\n",
+ htt_stats_buf->ac_mu_mimo_brp1_err);
+ len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_brp2_err = %u\n",
+ htt_stats_buf->ac_mu_mimo_brp2_err);
+ len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_brp3_err = %u\n\n",
+ htt_stats_buf->ac_mu_mimo_brp3_err);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -1558,37 +1460,37 @@ htt_print_tx_selfgen_ax_err_stats_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_SELFGEN_AX_ERR_STATS_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_su_ndp_err = %u",
- htt_stats_buf->ax_su_ndp_err);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_su_ndpa_err = %u",
- htt_stats_buf->ax_su_ndpa_err);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_ndpa_err = %u",
- htt_stats_buf->ax_mu_mimo_ndpa_err);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_ndp_err = %u",
- htt_stats_buf->ax_mu_mimo_ndp_err);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_brp1_err = %u",
- htt_stats_buf->ax_mu_mimo_brp1_err);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_brp2_err = %u",
- htt_stats_buf->ax_mu_mimo_brp2_err);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_brp3_err = %u",
- htt_stats_buf->ax_mu_mimo_brp3_err);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_brp4_err = %u",
- htt_stats_buf->ax_mu_mimo_brp4_err);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_brp5_err = %u",
- htt_stats_buf->ax_mu_mimo_brp5_err);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_brp6_err = %u",
- htt_stats_buf->ax_mu_mimo_brp6_err);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_brp7_err = %u",
- htt_stats_buf->ax_mu_mimo_brp7_err);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_basic_trigger_err = %u",
- htt_stats_buf->ax_basic_trigger_err);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_bsr_trigger_err = %u",
- htt_stats_buf->ax_bsr_trigger_err);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_bar_trigger_err = %u",
- htt_stats_buf->ax_mu_bar_trigger_err);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_rts_trigger_err = %u\n",
- htt_stats_buf->ax_mu_rts_trigger_err);
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_SELFGEN_AX_ERR_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "ax_su_ndp_err = %u\n",
+ htt_stats_buf->ax_su_ndp_err);
+ len += scnprintf(buf + len, buf_len - len, "ax_su_ndpa_err = %u\n",
+ htt_stats_buf->ax_su_ndpa_err);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_ndpa_err = %u\n",
+ htt_stats_buf->ax_mu_mimo_ndpa_err);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_ndp_err = %u\n",
+ htt_stats_buf->ax_mu_mimo_ndp_err);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brp1_err = %u\n",
+ htt_stats_buf->ax_mu_mimo_brp1_err);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brp2_err = %u\n",
+ htt_stats_buf->ax_mu_mimo_brp2_err);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brp3_err = %u\n",
+ htt_stats_buf->ax_mu_mimo_brp3_err);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brp4_err = %u\n",
+ htt_stats_buf->ax_mu_mimo_brp4_err);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brp5_err = %u\n",
+ htt_stats_buf->ax_mu_mimo_brp5_err);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brp6_err = %u\n",
+ htt_stats_buf->ax_mu_mimo_brp6_err);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brp7_err = %u\n",
+ htt_stats_buf->ax_mu_mimo_brp7_err);
+ len += scnprintf(buf + len, buf_len - len, "ax_basic_trigger_err = %u\n",
+ htt_stats_buf->ax_basic_trigger_err);
+ len += scnprintf(buf + len, buf_len - len, "ax_bsr_trigger_err = %u\n",
+ htt_stats_buf->ax_bsr_trigger_err);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_bar_trigger_err = %u\n",
+ htt_stats_buf->ax_mu_bar_trigger_err);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_rts_trigger_err = %u\n\n",
+ htt_stats_buf->ax_mu_rts_trigger_err);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -1608,35 +1510,35 @@ htt_print_tx_pdev_mu_mimo_sch_stats_tlv(const void *tag_buf,
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
u8 i;
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "HTT_TX_PDEV_MU_MIMO_SCH_STATS_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mu_mimo_sch_posted = %u",
- htt_stats_buf->mu_mimo_sch_posted);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mu_mimo_sch_failed = %u",
- htt_stats_buf->mu_mimo_sch_failed);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mu_mimo_ppdu_posted = %u\n",
- htt_stats_buf->mu_mimo_ppdu_posted);
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_PDEV_MU_MIMO_SCH_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mu_mimo_sch_posted = %u\n",
+ htt_stats_buf->mu_mimo_sch_posted);
+ len += scnprintf(buf + len, buf_len - len, "mu_mimo_sch_failed = %u\n",
+ htt_stats_buf->mu_mimo_sch_failed);
+ len += scnprintf(buf + len, buf_len - len, "mu_mimo_ppdu_posted = %u\n\n",
+ htt_stats_buf->mu_mimo_ppdu_posted);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "11ac MU_MIMO SCH STATS:");
+ len += scnprintf(buf + len, buf_len - len, "11ac MU_MIMO SCH STATS:\n");
for (i = 0; i < HTT_TX_PDEV_STATS_NUM_AC_MUMIMO_USER_STATS; i++)
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ac_mu_mimo_sch_nusers_%u = %u",
- i, htt_stats_buf->ac_mu_mimo_sch_nusers[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ac_mu_mimo_sch_nusers_%u = %u\n",
+ i, htt_stats_buf->ac_mu_mimo_sch_nusers[i]);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "\n11ax MU_MIMO SCH STATS:");
+ len += scnprintf(buf + len, buf_len - len, "\n11ax MU_MIMO SCH STATS:\n");
for (i = 0; i < HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS; i++)
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ax_mu_mimo_sch_nusers_%u = %u",
- i, htt_stats_buf->ax_mu_mimo_sch_nusers[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_mimo_sch_nusers_%u = %u\n",
+ i, htt_stats_buf->ax_mu_mimo_sch_nusers[i]);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "\n11ax OFDMA SCH STATS:");
+ len += scnprintf(buf + len, buf_len - len, "\n11ax OFDMA SCH STATS:\n");
for (i = 0; i < HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS; i++)
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ax_ofdma_sch_nusers_%u = %u",
- i, htt_stats_buf->ax_ofdma_sch_nusers[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ofdma_sch_nusers_%u = %u\n",
+ i, htt_stats_buf->ax_ofdma_sch_nusers[i]);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -1657,114 +1559,114 @@ htt_print_tx_pdev_mu_mimo_mpdu_stats_tlv(const void *tag_buf,
if (htt_stats_buf->tx_sched_mode == HTT_STATS_TX_SCHED_MODE_MU_MIMO_AC) {
if (!htt_stats_buf->user_index)
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "HTT_TX_PDEV_MU_MIMO_AC_MPDU_STATS:\n");
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_PDEV_MU_MIMO_AC_MPDU_STATS:\n");
if (htt_stats_buf->user_index <
HTT_TX_PDEV_STATS_NUM_AC_MUMIMO_USER_STATS) {
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ac_mu_mimo_mpdus_queued_usr_%u = %u",
- htt_stats_buf->user_index,
- htt_stats_buf->mpdus_queued_usr);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ac_mu_mimo_mpdus_tried_usr_%u = %u",
- htt_stats_buf->user_index,
- htt_stats_buf->mpdus_tried_usr);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ac_mu_mimo_mpdus_failed_usr_%u = %u",
- htt_stats_buf->user_index,
- htt_stats_buf->mpdus_failed_usr);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ac_mu_mimo_mpdus_requeued_usr_%u = %u",
- htt_stats_buf->user_index,
- htt_stats_buf->mpdus_requeued_usr);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ac_mu_mimo_err_no_ba_usr_%u = %u",
- htt_stats_buf->user_index,
- htt_stats_buf->err_no_ba_usr);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ac_mu_mimo_mpdu_underrun_usr_%u = %u",
- htt_stats_buf->user_index,
- htt_stats_buf->mpdu_underrun_usr);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ac_mu_mimo_ampdu_underrun_usr_%u = %u\n",
- htt_stats_buf->user_index,
- htt_stats_buf->ampdu_underrun_usr);
+ len += scnprintf(buf + len, buf_len - len,
+ "ac_mu_mimo_mpdus_queued_usr_%u = %u\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdus_queued_usr);
+ len += scnprintf(buf + len, buf_len - len,
+ "ac_mu_mimo_mpdus_tried_usr_%u = %u\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdus_tried_usr);
+ len += scnprintf(buf + len, buf_len - len,
+ "ac_mu_mimo_mpdus_failed_usr_%u = %u\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdus_failed_usr);
+ len += scnprintf(buf + len, buf_len - len,
+ "ac_mu_mimo_mpdus_requeued_usr_%u = %u\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdus_requeued_usr);
+ len += scnprintf(buf + len, buf_len - len,
+ "ac_mu_mimo_err_no_ba_usr_%u = %u\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->err_no_ba_usr);
+ len += scnprintf(buf + len, buf_len - len,
+ "ac_mu_mimo_mpdu_underrun_usr_%u = %u\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdu_underrun_usr);
+ len += scnprintf(buf + len, buf_len - len,
+ "ac_mu_mimo_ampdu_underrun_usr_%u = %u\n\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->ampdu_underrun_usr);
}
}
if (htt_stats_buf->tx_sched_mode == HTT_STATS_TX_SCHED_MODE_MU_MIMO_AX) {
if (!htt_stats_buf->user_index)
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "HTT_TX_PDEV_MU_MIMO_AX_MPDU_STATS:\n");
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_PDEV_MU_MIMO_AX_MPDU_STATS:\n");
if (htt_stats_buf->user_index <
HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS) {
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ax_mu_mimo_mpdus_queued_usr_%u = %u",
- htt_stats_buf->user_index,
- htt_stats_buf->mpdus_queued_usr);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ax_mu_mimo_mpdus_tried_usr_%u = %u",
- htt_stats_buf->user_index,
- htt_stats_buf->mpdus_tried_usr);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ax_mu_mimo_mpdus_failed_usr_%u = %u",
- htt_stats_buf->user_index,
- htt_stats_buf->mpdus_failed_usr);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ax_mu_mimo_mpdus_requeued_usr_%u = %u",
- htt_stats_buf->user_index,
- htt_stats_buf->mpdus_requeued_usr);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ax_mu_mimo_err_no_ba_usr_%u = %u",
- htt_stats_buf->user_index,
- htt_stats_buf->err_no_ba_usr);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ax_mu_mimo_mpdu_underrun_usr_%u = %u",
- htt_stats_buf->user_index,
- htt_stats_buf->mpdu_underrun_usr);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ax_mu_mimo_ampdu_underrun_usr_%u = %u\n",
- htt_stats_buf->user_index,
- htt_stats_buf->ampdu_underrun_usr);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_mimo_mpdus_queued_usr_%u = %u\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdus_queued_usr);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_mimo_mpdus_tried_usr_%u = %u\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdus_tried_usr);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_mimo_mpdus_failed_usr_%u = %u\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdus_failed_usr);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_mimo_mpdus_requeued_usr_%u = %u\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdus_requeued_usr);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_mimo_err_no_ba_usr_%u = %u\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->err_no_ba_usr);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_mimo_mpdu_underrun_usr_%u = %u\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdu_underrun_usr);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_mimo_ampdu_underrun_usr_%u = %u\n\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->ampdu_underrun_usr);
}
}
if (htt_stats_buf->tx_sched_mode == HTT_STATS_TX_SCHED_MODE_MU_OFDMA_AX) {
if (!htt_stats_buf->user_index)
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "HTT_TX_PDEV_AX_MU_OFDMA_MPDU_STATS:\n");
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_PDEV_AX_MU_OFDMA_MPDU_STATS:\n");
if (htt_stats_buf->user_index < HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS) {
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ax_mu_ofdma_mpdus_queued_usr_%u = %u",
- htt_stats_buf->user_index,
- htt_stats_buf->mpdus_queued_usr);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ax_mu_ofdma_mpdus_tried_usr_%u = %u",
- htt_stats_buf->user_index,
- htt_stats_buf->mpdus_tried_usr);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ax_mu_ofdma_mpdus_failed_usr_%u = %u",
- htt_stats_buf->user_index,
- htt_stats_buf->mpdus_failed_usr);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ax_mu_ofdma_mpdus_requeued_usr_%u = %u",
- htt_stats_buf->user_index,
- htt_stats_buf->mpdus_requeued_usr);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ax_mu_ofdma_err_no_ba_usr_%u = %u",
- htt_stats_buf->user_index,
- htt_stats_buf->err_no_ba_usr);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ax_mu_ofdma_mpdu_underrun_usr_%u = %u",
- htt_stats_buf->user_index,
- htt_stats_buf->mpdu_underrun_usr);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ax_mu_ofdma_ampdu_underrun_usr_%u = %u\n",
- htt_stats_buf->user_index,
- htt_stats_buf->ampdu_underrun_usr);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_ofdma_mpdus_queued_usr_%u = %u\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdus_queued_usr);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_ofdma_mpdus_tried_usr_%u = %u\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdus_tried_usr);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_ofdma_mpdus_failed_usr_%u = %u\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdus_failed_usr);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_ofdma_mpdus_requeued_usr_%u = %u\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdus_requeued_usr);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_ofdma_err_no_ba_usr_%u = %u\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->err_no_ba_usr);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_ofdma_mpdu_underrun_usr_%u = %u\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdu_underrun_usr);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_ofdma_ampdu_underrun_usr_%u = %u\n\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->ampdu_underrun_usr);
}
}
@@ -1785,15 +1687,12 @@ htt_print_sched_txq_cmd_posted_tlv_v(const void *tag_buf,
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- char sched_cmd_posted[HTT_MAX_STRING_LEN] = {0};
u16 num_elements = min_t(u16, (tag_len >> 2), HTT_TX_PDEV_SCHED_TX_MODE_MAX);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_SCHED_TXQ_CMD_POSTED_TLV_V:");
+ len += scnprintf(buf + len, buf_len - len, "HTT_SCHED_TXQ_CMD_POSTED_TLV_V:\n");
- ARRAY_TO_STRING(sched_cmd_posted, htt_stats_buf->sched_cmd_posted,
- num_elements);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "sched_cmd_posted = %s\n",
- sched_cmd_posted);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->sched_cmd_posted,
+ "sched_cmd_posted", num_elements, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -1812,15 +1711,12 @@ htt_print_sched_txq_cmd_reaped_tlv_v(const void *tag_buf,
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- char sched_cmd_reaped[HTT_MAX_STRING_LEN] = {0};
u16 num_elements = min_t(u16, (tag_len >> 2), HTT_TX_PDEV_SCHED_TX_MODE_MAX);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_SCHED_TXQ_CMD_REAPED_TLV_V:");
+ len += scnprintf(buf + len, buf_len - len, "HTT_SCHED_TXQ_CMD_REAPED_TLV_V:\n");
- ARRAY_TO_STRING(sched_cmd_reaped, htt_stats_buf->sched_cmd_reaped,
- num_elements);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "sched_cmd_reaped = %s\n",
- sched_cmd_reaped);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->sched_cmd_reaped,
+ "sched_cmd_reaped", num_elements, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -1839,18 +1735,15 @@ htt_print_sched_txq_sched_order_su_tlv_v(const void *tag_buf,
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- char sched_order_su[HTT_MAX_STRING_LEN] = {0};
/* each entry is u32, i.e. 4 bytes */
u32 sched_order_su_num_entries =
min_t(u32, (tag_len >> 2), HTT_TX_PDEV_NUM_SCHED_ORDER_LOG);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "HTT_SCHED_TXQ_SCHED_ORDER_SU_TLV_V:");
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_SCHED_TXQ_SCHED_ORDER_SU_TLV_V:\n");
- ARRAY_TO_STRING(sched_order_su, htt_stats_buf->sched_order_su,
- sched_order_su_num_entries);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "sched_order_su = %s\n",
- sched_order_su);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->sched_order_su, "sched_order_su",
+ sched_order_su_num_entries, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -1869,17 +1762,15 @@ htt_print_sched_txq_sched_ineligibility_tlv_v(const void *tag_buf,
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- char sched_ineligibility[HTT_MAX_STRING_LEN] = {0};
/* each entry is u32, i.e. 4 bytes */
u32 sched_ineligibility_num_entries = tag_len >> 2;
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "HTT_SCHED_TXQ_SCHED_INELIGIBILITY_V:");
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_SCHED_TXQ_SCHED_INELIGIBILITY_V:\n");
- ARRAY_TO_STRING(sched_ineligibility, htt_stats_buf->sched_ineligibility,
- sched_ineligibility_num_entries);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "sched_ineligibility = %s\n",
- sched_ineligibility);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->sched_ineligibility,
+ "sched_ineligibility", sched_ineligibility_num_entries,
+ "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -1898,54 +1789,56 @@ htt_print_tx_pdev_stats_sched_per_txq_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "HTT_TX_PDEV_STATS_SCHED_PER_TXQ_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u",
- htt_stats_buf->mac_id__txq_id__word & 0xFF);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "txq_id = %u",
- (htt_stats_buf->mac_id__txq_id__word & 0xFF00) >> 8);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "sched_policy = %u",
- htt_stats_buf->sched_policy);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "last_sched_cmd_posted_timestamp = %u",
- htt_stats_buf->last_sched_cmd_posted_timestamp);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "last_sched_cmd_compl_timestamp = %u",
- htt_stats_buf->last_sched_cmd_compl_timestamp);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "sched_2_tac_lwm_count = %u",
- htt_stats_buf->sched_2_tac_lwm_count);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "sched_2_tac_ring_full = %u",
- htt_stats_buf->sched_2_tac_ring_full);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "sched_cmd_post_failure = %u",
- htt_stats_buf->sched_cmd_post_failure);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "num_active_tids = %u",
- htt_stats_buf->num_active_tids);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "num_ps_schedules = %u",
- htt_stats_buf->num_ps_schedules);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "sched_cmds_pending = %u",
- htt_stats_buf->sched_cmds_pending);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "num_tid_register = %u",
- htt_stats_buf->num_tid_register);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "num_tid_unregister = %u",
- htt_stats_buf->num_tid_unregister);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "num_qstats_queried = %u",
- htt_stats_buf->num_qstats_queried);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "qstats_update_pending = %u",
- htt_stats_buf->qstats_update_pending);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "last_qstats_query_timestamp = %u",
- htt_stats_buf->last_qstats_query_timestamp);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "num_tqm_cmdq_full = %u",
- htt_stats_buf->num_tqm_cmdq_full);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "num_de_sched_algo_trigger = %u",
- htt_stats_buf->num_de_sched_algo_trigger);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "num_rt_sched_algo_trigger = %u",
- htt_stats_buf->num_rt_sched_algo_trigger);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "num_tqm_sched_algo_trigger = %u",
- htt_stats_buf->num_tqm_sched_algo_trigger);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "notify_sched = %u\n",
- htt_stats_buf->notify_sched);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "dur_based_sendn_term = %u\n",
- htt_stats_buf->dur_based_sendn_term);
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_PDEV_STATS_SCHED_PER_TXQ_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n",
+ FIELD_GET(HTT_TX_PDEV_STATS_SCHED_PER_TXQ_MAC_ID,
+ htt_stats_buf->mac_id__txq_id__word));
+ len += scnprintf(buf + len, buf_len - len, "txq_id = %lu\n",
+ FIELD_GET(HTT_TX_PDEV_STATS_SCHED_PER_TXQ_ID,
+ htt_stats_buf->mac_id__txq_id__word));
+ len += scnprintf(buf + len, buf_len - len, "sched_policy = %u\n",
+ htt_stats_buf->sched_policy);
+ len += scnprintf(buf + len, buf_len - len,
+ "last_sched_cmd_posted_timestamp = %u\n",
+ htt_stats_buf->last_sched_cmd_posted_timestamp);
+ len += scnprintf(buf + len, buf_len - len,
+ "last_sched_cmd_compl_timestamp = %u\n",
+ htt_stats_buf->last_sched_cmd_compl_timestamp);
+ len += scnprintf(buf + len, buf_len - len, "sched_2_tac_lwm_count = %u\n",
+ htt_stats_buf->sched_2_tac_lwm_count);
+ len += scnprintf(buf + len, buf_len - len, "sched_2_tac_ring_full = %u\n",
+ htt_stats_buf->sched_2_tac_ring_full);
+ len += scnprintf(buf + len, buf_len - len, "sched_cmd_post_failure = %u\n",
+ htt_stats_buf->sched_cmd_post_failure);
+ len += scnprintf(buf + len, buf_len - len, "num_active_tids = %u\n",
+ htt_stats_buf->num_active_tids);
+ len += scnprintf(buf + len, buf_len - len, "num_ps_schedules = %u\n",
+ htt_stats_buf->num_ps_schedules);
+ len += scnprintf(buf + len, buf_len - len, "sched_cmds_pending = %u\n",
+ htt_stats_buf->sched_cmds_pending);
+ len += scnprintf(buf + len, buf_len - len, "num_tid_register = %u\n",
+ htt_stats_buf->num_tid_register);
+ len += scnprintf(buf + len, buf_len - len, "num_tid_unregister = %u\n",
+ htt_stats_buf->num_tid_unregister);
+ len += scnprintf(buf + len, buf_len - len, "num_qstats_queried = %u\n",
+ htt_stats_buf->num_qstats_queried);
+ len += scnprintf(buf + len, buf_len - len, "qstats_update_pending = %u\n",
+ htt_stats_buf->qstats_update_pending);
+ len += scnprintf(buf + len, buf_len - len, "last_qstats_query_timestamp = %u\n",
+ htt_stats_buf->last_qstats_query_timestamp);
+ len += scnprintf(buf + len, buf_len - len, "num_tqm_cmdq_full = %u\n",
+ htt_stats_buf->num_tqm_cmdq_full);
+ len += scnprintf(buf + len, buf_len - len, "num_de_sched_algo_trigger = %u\n",
+ htt_stats_buf->num_de_sched_algo_trigger);
+ len += scnprintf(buf + len, buf_len - len, "num_rt_sched_algo_trigger = %u\n",
+ htt_stats_buf->num_rt_sched_algo_trigger);
+ len += scnprintf(buf + len, buf_len - len, "num_tqm_sched_algo_trigger = %u\n",
+ htt_stats_buf->num_tqm_sched_algo_trigger);
+ len += scnprintf(buf + len, buf_len - len, "notify_sched = %u\n\n",
+ htt_stats_buf->notify_sched);
+ len += scnprintf(buf + len, buf_len - len, "dur_based_sendn_term = %u\n\n",
+ htt_stats_buf->dur_based_sendn_term);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -1963,11 +1856,11 @@ static inline void htt_print_stats_tx_sched_cmn_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_STATS_TX_SCHED_CMN_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u",
- htt_stats_buf->mac_id__word & 0xFF);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "current_timestamp = %u\n",
- htt_stats_buf->current_timestamp);
+ len += scnprintf(buf + len, buf_len - len, "HTT_STATS_TX_SCHED_CMN_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n",
+ FIELD_GET(HTT_STATS_MAC_ID, htt_stats_buf->mac_id__word));
+ len += scnprintf(buf + len, buf_len - len, "current_timestamp = %u\n\n",
+ htt_stats_buf->current_timestamp);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -1986,16 +1879,13 @@ htt_print_tx_tqm_gen_mpdu_stats_tlv_v(const void *tag_buf,
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- char gen_mpdu_end_reason[HTT_MAX_STRING_LEN] = {0};
u16 num_elements = min_t(u16, (tag_len >> 2),
HTT_TX_TQM_MAX_LIST_MPDU_END_REASON);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_TQM_GEN_MPDU_STATS_TLV_V:");
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_TQM_GEN_MPDU_STATS_TLV_V:\n");
- ARRAY_TO_STRING(gen_mpdu_end_reason, htt_stats_buf->gen_mpdu_end_reason,
- num_elements);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "gen_mpdu_end_reason = %s\n",
- gen_mpdu_end_reason);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->gen_mpdu_end_reason,
+ "gen_mpdu_end_reason", num_elements, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -2014,16 +1904,14 @@ htt_print_tx_tqm_list_mpdu_stats_tlv_v(const void *tag_buf,
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- char list_mpdu_end_reason[HTT_MAX_STRING_LEN] = {0};
u16 num_elems = min_t(u16, (tag_len >> 2), HTT_TX_TQM_MAX_LIST_MPDU_END_REASON);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "HTT_TX_TQM_LIST_MPDU_STATS_TLV_V:");
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_TQM_LIST_MPDU_STATS_TLV_V:\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->list_mpdu_end_reason,
+ "list_mpdu_end_reason", num_elems, "\n\n");
- ARRAY_TO_STRING(list_mpdu_end_reason, htt_stats_buf->list_mpdu_end_reason,
- num_elems);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "list_mpdu_end_reason = %s\n",
- list_mpdu_end_reason);
if (len >= buf_len)
buf[buf_len - 1] = 0;
else
@@ -2041,16 +1929,13 @@ htt_print_tx_tqm_list_mpdu_cnt_tlv_v(const void *tag_buf,
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- char list_mpdu_cnt_hist[HTT_MAX_STRING_LEN] = {0};
u16 num_elems = min_t(u16, (tag_len >> 2),
HTT_TX_TQM_MAX_LIST_MPDU_CNT_HISTOGRAM_BINS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_TQM_LIST_MPDU_CNT_TLV_V:");
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_TQM_LIST_MPDU_CNT_TLV_V:\n");
- ARRAY_TO_STRING(list_mpdu_cnt_hist, htt_stats_buf->list_mpdu_cnt_hist,
- num_elems);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "list_mpdu_cnt_hist = %s\n",
- list_mpdu_cnt_hist);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->list_mpdu_cnt_hist,
+ "list_mpdu_cnt_hist", num_elems, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -2069,69 +1954,69 @@ htt_print_tx_tqm_pdev_stats_tlv_v(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_TQM_PDEV_STATS_TLV_V:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "msdu_count = %u",
- htt_stats_buf->msdu_count);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_count = %u",
- htt_stats_buf->mpdu_count);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "remove_msdu = %u",
- htt_stats_buf->remove_msdu);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "remove_mpdu = %u",
- htt_stats_buf->remove_mpdu);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "remove_msdu_ttl = %u",
- htt_stats_buf->remove_msdu_ttl);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "send_bar = %u",
- htt_stats_buf->send_bar);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "bar_sync = %u",
- htt_stats_buf->bar_sync);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "notify_mpdu = %u",
- htt_stats_buf->notify_mpdu);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "sync_cmd = %u",
- htt_stats_buf->sync_cmd);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "write_cmd = %u",
- htt_stats_buf->write_cmd);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "hwsch_trigger = %u",
- htt_stats_buf->hwsch_trigger);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ack_tlv_proc = %u",
- htt_stats_buf->ack_tlv_proc);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "gen_mpdu_cmd = %u",
- htt_stats_buf->gen_mpdu_cmd);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "gen_list_cmd = %u",
- htt_stats_buf->gen_list_cmd);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "remove_mpdu_cmd = %u",
- htt_stats_buf->remove_mpdu_cmd);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "remove_mpdu_tried_cmd = %u",
- htt_stats_buf->remove_mpdu_tried_cmd);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_queue_stats_cmd = %u",
- htt_stats_buf->mpdu_queue_stats_cmd);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_head_info_cmd = %u",
- htt_stats_buf->mpdu_head_info_cmd);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "msdu_flow_stats_cmd = %u",
- htt_stats_buf->msdu_flow_stats_cmd);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "remove_msdu_cmd = %u",
- htt_stats_buf->remove_msdu_cmd);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "remove_msdu_ttl_cmd = %u",
- htt_stats_buf->remove_msdu_ttl_cmd);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "flush_cache_cmd = %u",
- htt_stats_buf->flush_cache_cmd);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "update_mpduq_cmd = %u",
- htt_stats_buf->update_mpduq_cmd);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "enqueue = %u",
- htt_stats_buf->enqueue);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "enqueue_notify = %u",
- htt_stats_buf->enqueue_notify);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "notify_mpdu_at_head = %u",
- htt_stats_buf->notify_mpdu_at_head);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "notify_mpdu_state_valid = %u",
- htt_stats_buf->notify_mpdu_state_valid);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "sched_udp_notify1 = %u",
- htt_stats_buf->sched_udp_notify1);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "sched_udp_notify2 = %u",
- htt_stats_buf->sched_udp_notify2);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "sched_nonudp_notify1 = %u",
- htt_stats_buf->sched_nonudp_notify1);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "sched_nonudp_notify2 = %u\n",
- htt_stats_buf->sched_nonudp_notify2);
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_TQM_PDEV_STATS_TLV_V:\n");
+ len += scnprintf(buf + len, buf_len - len, "msdu_count = %u\n",
+ htt_stats_buf->msdu_count);
+ len += scnprintf(buf + len, buf_len - len, "mpdu_count = %u\n",
+ htt_stats_buf->mpdu_count);
+ len += scnprintf(buf + len, buf_len - len, "remove_msdu = %u\n",
+ htt_stats_buf->remove_msdu);
+ len += scnprintf(buf + len, buf_len - len, "remove_mpdu = %u\n",
+ htt_stats_buf->remove_mpdu);
+ len += scnprintf(buf + len, buf_len - len, "remove_msdu_ttl = %u\n",
+ htt_stats_buf->remove_msdu_ttl);
+ len += scnprintf(buf + len, buf_len - len, "send_bar = %u\n",
+ htt_stats_buf->send_bar);
+ len += scnprintf(buf + len, buf_len - len, "bar_sync = %u\n",
+ htt_stats_buf->bar_sync);
+ len += scnprintf(buf + len, buf_len - len, "notify_mpdu = %u\n",
+ htt_stats_buf->notify_mpdu);
+ len += scnprintf(buf + len, buf_len - len, "sync_cmd = %u\n",
+ htt_stats_buf->sync_cmd);
+ len += scnprintf(buf + len, buf_len - len, "write_cmd = %u\n",
+ htt_stats_buf->write_cmd);
+ len += scnprintf(buf + len, buf_len - len, "hwsch_trigger = %u\n",
+ htt_stats_buf->hwsch_trigger);
+ len += scnprintf(buf + len, buf_len - len, "ack_tlv_proc = %u\n",
+ htt_stats_buf->ack_tlv_proc);
+ len += scnprintf(buf + len, buf_len - len, "gen_mpdu_cmd = %u\n",
+ htt_stats_buf->gen_mpdu_cmd);
+ len += scnprintf(buf + len, buf_len - len, "gen_list_cmd = %u\n",
+ htt_stats_buf->gen_list_cmd);
+ len += scnprintf(buf + len, buf_len - len, "remove_mpdu_cmd = %u\n",
+ htt_stats_buf->remove_mpdu_cmd);
+ len += scnprintf(buf + len, buf_len - len, "remove_mpdu_tried_cmd = %u\n",
+ htt_stats_buf->remove_mpdu_tried_cmd);
+ len += scnprintf(buf + len, buf_len - len, "mpdu_queue_stats_cmd = %u\n",
+ htt_stats_buf->mpdu_queue_stats_cmd);
+ len += scnprintf(buf + len, buf_len - len, "mpdu_head_info_cmd = %u\n",
+ htt_stats_buf->mpdu_head_info_cmd);
+ len += scnprintf(buf + len, buf_len - len, "msdu_flow_stats_cmd = %u\n",
+ htt_stats_buf->msdu_flow_stats_cmd);
+ len += scnprintf(buf + len, buf_len - len, "remove_msdu_cmd = %u\n",
+ htt_stats_buf->remove_msdu_cmd);
+ len += scnprintf(buf + len, buf_len - len, "remove_msdu_ttl_cmd = %u\n",
+ htt_stats_buf->remove_msdu_ttl_cmd);
+ len += scnprintf(buf + len, buf_len - len, "flush_cache_cmd = %u\n",
+ htt_stats_buf->flush_cache_cmd);
+ len += scnprintf(buf + len, buf_len - len, "update_mpduq_cmd = %u\n",
+ htt_stats_buf->update_mpduq_cmd);
+ len += scnprintf(buf + len, buf_len - len, "enqueue = %u\n",
+ htt_stats_buf->enqueue);
+ len += scnprintf(buf + len, buf_len - len, "enqueue_notify = %u\n",
+ htt_stats_buf->enqueue_notify);
+ len += scnprintf(buf + len, buf_len - len, "notify_mpdu_at_head = %u\n",
+ htt_stats_buf->notify_mpdu_at_head);
+ len += scnprintf(buf + len, buf_len - len, "notify_mpdu_state_valid = %u\n",
+ htt_stats_buf->notify_mpdu_state_valid);
+ len += scnprintf(buf + len, buf_len - len, "sched_udp_notify1 = %u\n",
+ htt_stats_buf->sched_udp_notify1);
+ len += scnprintf(buf + len, buf_len - len, "sched_udp_notify2 = %u\n",
+ htt_stats_buf->sched_udp_notify2);
+ len += scnprintf(buf + len, buf_len - len, "sched_nonudp_notify1 = %u\n",
+ htt_stats_buf->sched_nonudp_notify1);
+ len += scnprintf(buf + len, buf_len - len, "sched_nonudp_notify2 = %u\n\n",
+ htt_stats_buf->sched_nonudp_notify2);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -2149,23 +2034,23 @@ static inline void htt_print_tx_tqm_cmn_stats_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_TQM_CMN_STATS_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u",
- htt_stats_buf->mac_id__word & 0xFF);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "max_cmdq_id = %u",
- htt_stats_buf->max_cmdq_id);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "list_mpdu_cnt_hist_intvl = %u",
- htt_stats_buf->list_mpdu_cnt_hist_intvl);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "add_msdu = %u",
- htt_stats_buf->add_msdu);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "q_empty = %u",
- htt_stats_buf->q_empty);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "q_not_empty = %u",
- htt_stats_buf->q_not_empty);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "drop_notification = %u",
- htt_stats_buf->drop_notification);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "desc_threshold = %u\n",
- htt_stats_buf->desc_threshold);
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_TQM_CMN_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n",
+ FIELD_GET(HTT_STATS_MAC_ID, htt_stats_buf->mac_id__word));
+ len += scnprintf(buf + len, buf_len - len, "max_cmdq_id = %u\n",
+ htt_stats_buf->max_cmdq_id);
+ len += scnprintf(buf + len, buf_len - len, "list_mpdu_cnt_hist_intvl = %u\n",
+ htt_stats_buf->list_mpdu_cnt_hist_intvl);
+ len += scnprintf(buf + len, buf_len - len, "add_msdu = %u\n",
+ htt_stats_buf->add_msdu);
+ len += scnprintf(buf + len, buf_len - len, "q_empty = %u\n",
+ htt_stats_buf->q_empty);
+ len += scnprintf(buf + len, buf_len - len, "q_not_empty = %u\n",
+ htt_stats_buf->q_not_empty);
+ len += scnprintf(buf + len, buf_len - len, "drop_notification = %u\n",
+ htt_stats_buf->drop_notification);
+ len += scnprintf(buf + len, buf_len - len, "desc_threshold = %u\n\n",
+ htt_stats_buf->desc_threshold);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -2183,13 +2068,13 @@ static inline void htt_print_tx_tqm_error_stats_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_TQM_ERROR_STATS_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "q_empty_failure = %u",
- htt_stats_buf->q_empty_failure);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "q_not_empty_failure = %u",
- htt_stats_buf->q_not_empty_failure);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "add_msdu_failure = %u\n",
- htt_stats_buf->add_msdu_failure);
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_TQM_ERROR_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "q_empty_failure = %u\n",
+ htt_stats_buf->q_empty_failure);
+ len += scnprintf(buf + len, buf_len - len, "q_not_empty_failure = %u\n",
+ htt_stats_buf->q_not_empty_failure);
+ len += scnprintf(buf + len, buf_len - len, "add_msdu_failure = %u\n\n",
+ htt_stats_buf->add_msdu_failure);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -2207,33 +2092,35 @@ static inline void htt_print_tx_tqm_cmdq_status_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_TQM_CMDQ_STATUS_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u",
- htt_stats_buf->mac_id__cmdq_id__word & 0xFF);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "cmdq_id = %u\n",
- (htt_stats_buf->mac_id__cmdq_id__word & 0xFF00) >> 8);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "sync_cmd = %u",
- htt_stats_buf->sync_cmd);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "write_cmd = %u",
- htt_stats_buf->write_cmd);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "gen_mpdu_cmd = %u",
- htt_stats_buf->gen_mpdu_cmd);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_queue_stats_cmd = %u",
- htt_stats_buf->mpdu_queue_stats_cmd);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_head_info_cmd = %u",
- htt_stats_buf->mpdu_head_info_cmd);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "msdu_flow_stats_cmd = %u",
- htt_stats_buf->msdu_flow_stats_cmd);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "remove_mpdu_cmd = %u",
- htt_stats_buf->remove_mpdu_cmd);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "remove_msdu_cmd = %u",
- htt_stats_buf->remove_msdu_cmd);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "flush_cache_cmd = %u",
- htt_stats_buf->flush_cache_cmd);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "update_mpduq_cmd = %u",
- htt_stats_buf->update_mpduq_cmd);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "update_msduq_cmd = %u\n",
- htt_stats_buf->update_msduq_cmd);
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_TQM_CMDQ_STATUS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n",
+ FIELD_GET(HTT_TX_TQM_CMDQ_STATUS_MAC_ID,
+ htt_stats_buf->mac_id__cmdq_id__word));
+ len += scnprintf(buf + len, buf_len - len, "cmdq_id = %lu\n\n",
+ FIELD_GET(HTT_TX_TQM_CMDQ_STATUS_CMDQ_ID,
+ htt_stats_buf->mac_id__cmdq_id__word));
+ len += scnprintf(buf + len, buf_len - len, "sync_cmd = %u\n",
+ htt_stats_buf->sync_cmd);
+ len += scnprintf(buf + len, buf_len - len, "write_cmd = %u\n",
+ htt_stats_buf->write_cmd);
+ len += scnprintf(buf + len, buf_len - len, "gen_mpdu_cmd = %u\n",
+ htt_stats_buf->gen_mpdu_cmd);
+ len += scnprintf(buf + len, buf_len - len, "mpdu_queue_stats_cmd = %u\n",
+ htt_stats_buf->mpdu_queue_stats_cmd);
+ len += scnprintf(buf + len, buf_len - len, "mpdu_head_info_cmd = %u\n",
+ htt_stats_buf->mpdu_head_info_cmd);
+ len += scnprintf(buf + len, buf_len - len, "msdu_flow_stats_cmd = %u\n",
+ htt_stats_buf->msdu_flow_stats_cmd);
+ len += scnprintf(buf + len, buf_len - len, "remove_mpdu_cmd = %u\n",
+ htt_stats_buf->remove_mpdu_cmd);
+ len += scnprintf(buf + len, buf_len - len, "remove_msdu_cmd = %u\n",
+ htt_stats_buf->remove_msdu_cmd);
+ len += scnprintf(buf + len, buf_len - len, "flush_cache_cmd = %u\n",
+ htt_stats_buf->flush_cache_cmd);
+ len += scnprintf(buf + len, buf_len - len, "update_mpduq_cmd = %u\n",
+ htt_stats_buf->update_mpduq_cmd);
+ len += scnprintf(buf + len, buf_len - len, "update_msduq_cmd = %u\n\n",
+ htt_stats_buf->update_msduq_cmd);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -2252,20 +2139,20 @@ htt_print_tx_de_eapol_packets_stats_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "HTT_TX_DE_EAPOL_PACKETS_STATS_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "m1_packets = %u",
- htt_stats_buf->m1_packets);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "m2_packets = %u",
- htt_stats_buf->m2_packets);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "m3_packets = %u",
- htt_stats_buf->m3_packets);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "m4_packets = %u",
- htt_stats_buf->m4_packets);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "g1_packets = %u",
- htt_stats_buf->g1_packets);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "g2_packets = %u\n",
- htt_stats_buf->g2_packets);
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_DE_EAPOL_PACKETS_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "m1_packets = %u\n",
+ htt_stats_buf->m1_packets);
+ len += scnprintf(buf + len, buf_len - len, "m2_packets = %u\n",
+ htt_stats_buf->m2_packets);
+ len += scnprintf(buf + len, buf_len - len, "m3_packets = %u\n",
+ htt_stats_buf->m3_packets);
+ len += scnprintf(buf + len, buf_len - len, "m4_packets = %u\n",
+ htt_stats_buf->m4_packets);
+ len += scnprintf(buf + len, buf_len - len, "g1_packets = %u\n",
+ htt_stats_buf->g1_packets);
+ len += scnprintf(buf + len, buf_len - len, "g2_packets = %u\n\n",
+ htt_stats_buf->g2_packets);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -2284,34 +2171,34 @@ htt_print_tx_de_classify_failed_stats_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "HTT_TX_DE_CLASSIFY_FAILED_STATS_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ap_bss_peer_not_found = %u",
- htt_stats_buf->ap_bss_peer_not_found);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ap_bcast_mcast_no_peer = %u",
- htt_stats_buf->ap_bcast_mcast_no_peer);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "sta_delete_in_progress = %u",
- htt_stats_buf->sta_delete_in_progress);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ibss_no_bss_peer = %u",
- htt_stats_buf->ibss_no_bss_peer);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "invalid_vdev_type = %u",
- htt_stats_buf->invalid_vdev_type);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "invalid_ast_peer_entry = %u",
- htt_stats_buf->invalid_ast_peer_entry);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "peer_entry_invalid = %u",
- htt_stats_buf->peer_entry_invalid);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ethertype_not_ip = %u",
- htt_stats_buf->ethertype_not_ip);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "eapol_lookup_failed = %u",
- htt_stats_buf->eapol_lookup_failed);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "qpeer_not_allow_data = %u",
- htt_stats_buf->qpeer_not_allow_data);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fse_tid_override = %u",
- htt_stats_buf->fse_tid_override);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ipv6_jumbogram_zero_length = %u",
- htt_stats_buf->ipv6_jumbogram_zero_length);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "qos_to_non_qos_in_prog = %u\n",
- htt_stats_buf->qos_to_non_qos_in_prog);
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_DE_CLASSIFY_FAILED_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "ap_bss_peer_not_found = %u\n",
+ htt_stats_buf->ap_bss_peer_not_found);
+ len += scnprintf(buf + len, buf_len - len, "ap_bcast_mcast_no_peer = %u\n",
+ htt_stats_buf->ap_bcast_mcast_no_peer);
+ len += scnprintf(buf + len, buf_len - len, "sta_delete_in_progress = %u\n",
+ htt_stats_buf->sta_delete_in_progress);
+ len += scnprintf(buf + len, buf_len - len, "ibss_no_bss_peer = %u\n",
+ htt_stats_buf->ibss_no_bss_peer);
+ len += scnprintf(buf + len, buf_len - len, "invalid_vdev_type = %u\n",
+ htt_stats_buf->invalid_vdev_type);
+ len += scnprintf(buf + len, buf_len - len, "invalid_ast_peer_entry = %u\n",
+ htt_stats_buf->invalid_ast_peer_entry);
+ len += scnprintf(buf + len, buf_len - len, "peer_entry_invalid = %u\n",
+ htt_stats_buf->peer_entry_invalid);
+ len += scnprintf(buf + len, buf_len - len, "ethertype_not_ip = %u\n",
+ htt_stats_buf->ethertype_not_ip);
+ len += scnprintf(buf + len, buf_len - len, "eapol_lookup_failed = %u\n",
+ htt_stats_buf->eapol_lookup_failed);
+ len += scnprintf(buf + len, buf_len - len, "qpeer_not_allow_data = %u\n",
+ htt_stats_buf->qpeer_not_allow_data);
+ len += scnprintf(buf + len, buf_len - len, "fse_tid_override = %u\n",
+ htt_stats_buf->fse_tid_override);
+ len += scnprintf(buf + len, buf_len - len, "ipv6_jumbogram_zero_length = %u\n",
+ htt_stats_buf->ipv6_jumbogram_zero_length);
+ len += scnprintf(buf + len, buf_len - len, "qos_to_non_qos_in_prog = %u\n\n",
+ htt_stats_buf->qos_to_non_qos_in_prog);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -2330,73 +2217,73 @@ htt_print_tx_de_classify_stats_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_DE_CLASSIFY_STATS_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "arp_packets = %u",
- htt_stats_buf->arp_packets);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "igmp_packets = %u",
- htt_stats_buf->igmp_packets);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "dhcp_packets = %u",
- htt_stats_buf->dhcp_packets);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "host_inspected = %u",
- htt_stats_buf->host_inspected);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "htt_included = %u",
- htt_stats_buf->htt_included);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "htt_valid_mcs = %u",
- htt_stats_buf->htt_valid_mcs);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "htt_valid_nss = %u",
- htt_stats_buf->htt_valid_nss);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "htt_valid_preamble_type = %u",
- htt_stats_buf->htt_valid_preamble_type);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "htt_valid_chainmask = %u",
- htt_stats_buf->htt_valid_chainmask);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "htt_valid_guard_interval = %u",
- htt_stats_buf->htt_valid_guard_interval);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "htt_valid_retries = %u",
- htt_stats_buf->htt_valid_retries);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "htt_valid_bw_info = %u",
- htt_stats_buf->htt_valid_bw_info);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "htt_valid_power = %u",
- htt_stats_buf->htt_valid_power);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "htt_valid_key_flags = 0x%x",
- htt_stats_buf->htt_valid_key_flags);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "htt_valid_no_encryption = %u",
- htt_stats_buf->htt_valid_no_encryption);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fse_entry_count = %u",
- htt_stats_buf->fse_entry_count);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fse_priority_be = %u",
- htt_stats_buf->fse_priority_be);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fse_priority_high = %u",
- htt_stats_buf->fse_priority_high);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fse_priority_low = %u",
- htt_stats_buf->fse_priority_low);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fse_traffic_ptrn_be = %u",
- htt_stats_buf->fse_traffic_ptrn_be);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fse_traffic_ptrn_over_sub = %u",
- htt_stats_buf->fse_traffic_ptrn_over_sub);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fse_traffic_ptrn_bursty = %u",
- htt_stats_buf->fse_traffic_ptrn_bursty);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fse_traffic_ptrn_interactive = %u",
- htt_stats_buf->fse_traffic_ptrn_interactive);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fse_traffic_ptrn_periodic = %u",
- htt_stats_buf->fse_traffic_ptrn_periodic);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fse_hwqueue_alloc = %u",
- htt_stats_buf->fse_hwqueue_alloc);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fse_hwqueue_created = %u",
- htt_stats_buf->fse_hwqueue_created);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fse_hwqueue_send_to_host = %u",
- htt_stats_buf->fse_hwqueue_send_to_host);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mcast_entry = %u",
- htt_stats_buf->mcast_entry);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "bcast_entry = %u",
- htt_stats_buf->bcast_entry);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "htt_update_peer_cache = %u",
- htt_stats_buf->htt_update_peer_cache);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "htt_learning_frame = %u",
- htt_stats_buf->htt_learning_frame);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fse_invalid_peer = %u",
- htt_stats_buf->fse_invalid_peer);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mec_notify = %u\n",
- htt_stats_buf->mec_notify);
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_DE_CLASSIFY_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "arp_packets = %u\n",
+ htt_stats_buf->arp_packets);
+ len += scnprintf(buf + len, buf_len - len, "igmp_packets = %u\n",
+ htt_stats_buf->igmp_packets);
+ len += scnprintf(buf + len, buf_len - len, "dhcp_packets = %u\n",
+ htt_stats_buf->dhcp_packets);
+ len += scnprintf(buf + len, buf_len - len, "host_inspected = %u\n",
+ htt_stats_buf->host_inspected);
+ len += scnprintf(buf + len, buf_len - len, "htt_included = %u\n",
+ htt_stats_buf->htt_included);
+ len += scnprintf(buf + len, buf_len - len, "htt_valid_mcs = %u\n",
+ htt_stats_buf->htt_valid_mcs);
+ len += scnprintf(buf + len, buf_len - len, "htt_valid_nss = %u\n",
+ htt_stats_buf->htt_valid_nss);
+ len += scnprintf(buf + len, buf_len - len, "htt_valid_preamble_type = %u\n",
+ htt_stats_buf->htt_valid_preamble_type);
+ len += scnprintf(buf + len, buf_len - len, "htt_valid_chainmask = %u\n",
+ htt_stats_buf->htt_valid_chainmask);
+ len += scnprintf(buf + len, buf_len - len, "htt_valid_guard_interval = %u\n",
+ htt_stats_buf->htt_valid_guard_interval);
+ len += scnprintf(buf + len, buf_len - len, "htt_valid_retries = %u\n",
+ htt_stats_buf->htt_valid_retries);
+ len += scnprintf(buf + len, buf_len - len, "htt_valid_bw_info = %u\n",
+ htt_stats_buf->htt_valid_bw_info);
+ len += scnprintf(buf + len, buf_len - len, "htt_valid_power = %u\n",
+ htt_stats_buf->htt_valid_power);
+ len += scnprintf(buf + len, buf_len - len, "htt_valid_key_flags = 0x%x\n",
+ htt_stats_buf->htt_valid_key_flags);
+ len += scnprintf(buf + len, buf_len - len, "htt_valid_no_encryption = %u\n",
+ htt_stats_buf->htt_valid_no_encryption);
+ len += scnprintf(buf + len, buf_len - len, "fse_entry_count = %u\n",
+ htt_stats_buf->fse_entry_count);
+ len += scnprintf(buf + len, buf_len - len, "fse_priority_be = %u\n",
+ htt_stats_buf->fse_priority_be);
+ len += scnprintf(buf + len, buf_len - len, "fse_priority_high = %u\n",
+ htt_stats_buf->fse_priority_high);
+ len += scnprintf(buf + len, buf_len - len, "fse_priority_low = %u\n",
+ htt_stats_buf->fse_priority_low);
+ len += scnprintf(buf + len, buf_len - len, "fse_traffic_ptrn_be = %u\n",
+ htt_stats_buf->fse_traffic_ptrn_be);
+ len += scnprintf(buf + len, buf_len - len, "fse_traffic_ptrn_over_sub = %u\n",
+ htt_stats_buf->fse_traffic_ptrn_over_sub);
+ len += scnprintf(buf + len, buf_len - len, "fse_traffic_ptrn_bursty = %u\n",
+ htt_stats_buf->fse_traffic_ptrn_bursty);
+ len += scnprintf(buf + len, buf_len - len, "fse_traffic_ptrn_interactive = %u\n",
+ htt_stats_buf->fse_traffic_ptrn_interactive);
+ len += scnprintf(buf + len, buf_len - len, "fse_traffic_ptrn_periodic = %u\n",
+ htt_stats_buf->fse_traffic_ptrn_periodic);
+ len += scnprintf(buf + len, buf_len - len, "fse_hwqueue_alloc = %u\n",
+ htt_stats_buf->fse_hwqueue_alloc);
+ len += scnprintf(buf + len, buf_len - len, "fse_hwqueue_created = %u\n",
+ htt_stats_buf->fse_hwqueue_created);
+ len += scnprintf(buf + len, buf_len - len, "fse_hwqueue_send_to_host = %u\n",
+ htt_stats_buf->fse_hwqueue_send_to_host);
+ len += scnprintf(buf + len, buf_len - len, "mcast_entry = %u\n",
+ htt_stats_buf->mcast_entry);
+ len += scnprintf(buf + len, buf_len - len, "bcast_entry = %u\n",
+ htt_stats_buf->bcast_entry);
+ len += scnprintf(buf + len, buf_len - len, "htt_update_peer_cache = %u\n",
+ htt_stats_buf->htt_update_peer_cache);
+ len += scnprintf(buf + len, buf_len - len, "htt_learning_frame = %u\n",
+ htt_stats_buf->htt_learning_frame);
+ len += scnprintf(buf + len, buf_len - len, "fse_invalid_peer = %u\n",
+ htt_stats_buf->fse_invalid_peer);
+ len += scnprintf(buf + len, buf_len - len, "mec_notify = %u\n\n",
+ htt_stats_buf->mec_notify);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -2415,24 +2302,24 @@ htt_print_tx_de_classify_status_stats_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "HTT_TX_DE_CLASSIFY_STATUS_STATS_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "eok = %u",
- htt_stats_buf->eok);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "classify_done = %u",
- htt_stats_buf->classify_done);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "lookup_failed = %u",
- htt_stats_buf->lookup_failed);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "send_host_dhcp = %u",
- htt_stats_buf->send_host_dhcp);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "send_host_mcast = %u",
- htt_stats_buf->send_host_mcast);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "send_host_unknown_dest = %u",
- htt_stats_buf->send_host_unknown_dest);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "send_host = %u",
- htt_stats_buf->send_host);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "status_invalid = %u\n",
- htt_stats_buf->status_invalid);
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_DE_CLASSIFY_STATUS_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "eok = %u\n",
+ htt_stats_buf->eok);
+ len += scnprintf(buf + len, buf_len - len, "classify_done = %u\n",
+ htt_stats_buf->classify_done);
+ len += scnprintf(buf + len, buf_len - len, "lookup_failed = %u\n",
+ htt_stats_buf->lookup_failed);
+ len += scnprintf(buf + len, buf_len - len, "send_host_dhcp = %u\n",
+ htt_stats_buf->send_host_dhcp);
+ len += scnprintf(buf + len, buf_len - len, "send_host_mcast = %u\n",
+ htt_stats_buf->send_host_mcast);
+ len += scnprintf(buf + len, buf_len - len, "send_host_unknown_dest = %u\n",
+ htt_stats_buf->send_host_unknown_dest);
+ len += scnprintf(buf + len, buf_len - len, "send_host = %u\n",
+ htt_stats_buf->send_host);
+ len += scnprintf(buf + len, buf_len - len, "status_invalid = %u\n\n",
+ htt_stats_buf->status_invalid);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -2451,14 +2338,14 @@ htt_print_tx_de_enqueue_packets_stats_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "HTT_TX_DE_ENQUEUE_PACKETS_STATS_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "enqueued_pkts = %u",
- htt_stats_buf->enqueued_pkts);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "to_tqm = %u",
- htt_stats_buf->to_tqm);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "to_tqm_bypass = %u\n",
- htt_stats_buf->to_tqm_bypass);
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_DE_ENQUEUE_PACKETS_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "enqueued_pkts = %u\n",
+ htt_stats_buf->enqueued_pkts);
+ len += scnprintf(buf + len, buf_len - len, "to_tqm = %u\n",
+ htt_stats_buf->to_tqm);
+ len += scnprintf(buf + len, buf_len - len, "to_tqm_bypass = %u\n\n",
+ htt_stats_buf->to_tqm_bypass);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -2477,14 +2364,14 @@ htt_print_tx_de_enqueue_discard_stats_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "HTT_TX_DE_ENQUEUE_DISCARD_STATS_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "discarded_pkts = %u",
- htt_stats_buf->discarded_pkts);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "local_frames = %u",
- htt_stats_buf->local_frames);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "is_ext_msdu = %u\n",
- htt_stats_buf->is_ext_msdu);
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_DE_ENQUEUE_DISCARD_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "discarded_pkts = %u\n",
+ htt_stats_buf->discarded_pkts);
+ len += scnprintf(buf + len, buf_len - len, "local_frames = %u\n",
+ htt_stats_buf->local_frames);
+ len += scnprintf(buf + len, buf_len - len, "is_ext_msdu = %u\n\n",
+ htt_stats_buf->is_ext_msdu);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -2502,17 +2389,17 @@ static inline void htt_print_tx_de_compl_stats_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_DE_COMPL_STATS_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tcl_dummy_frame = %u",
- htt_stats_buf->tcl_dummy_frame);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tqm_dummy_frame = %u",
- htt_stats_buf->tqm_dummy_frame);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tqm_notify_frame = %u",
- htt_stats_buf->tqm_notify_frame);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fw2wbm_enq = %u",
- htt_stats_buf->fw2wbm_enq);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tqm_bypass_frame = %u\n",
- htt_stats_buf->tqm_bypass_frame);
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_DE_COMPL_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "tcl_dummy_frame = %u\n",
+ htt_stats_buf->tcl_dummy_frame);
+ len += scnprintf(buf + len, buf_len - len, "tqm_dummy_frame = %u\n",
+ htt_stats_buf->tqm_dummy_frame);
+ len += scnprintf(buf + len, buf_len - len, "tqm_notify_frame = %u\n",
+ htt_stats_buf->tqm_notify_frame);
+ len += scnprintf(buf + len, buf_len - len, "fw2wbm_enq = %u\n",
+ htt_stats_buf->fw2wbm_enq);
+ len += scnprintf(buf + len, buf_len - len, "tqm_bypass_frame = %u\n\n",
+ htt_stats_buf->tqm_bypass_frame);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -2531,24 +2418,13 @@ htt_print_tx_de_fw2wbm_ring_full_hist_tlv(const void *tag_buf,
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- char fw2wbm_ring_full_hist[HTT_MAX_STRING_LEN] = {0};
u16 num_elements = tag_len >> 2;
- u32 required_buffer_size = HTT_MAX_PRINT_CHAR_PER_ELEM * num_elements;
-
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "HTT_TX_DE_FW2WBM_RING_FULL_HIST_TLV");
-
- if (required_buffer_size < HTT_MAX_STRING_LEN) {
- ARRAY_TO_STRING(fw2wbm_ring_full_hist,
- htt_stats_buf->fw2wbm_ring_full_hist,
- num_elements);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "fw2wbm_ring_full_hist = %s\n",
- fw2wbm_ring_full_hist);
- } else {
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "INSUFFICIENT PRINT BUFFER ");
- }
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_DE_FW2WBM_RING_FULL_HIST_TLV");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->fw2wbm_ring_full_hist,
+ "fw2wbm_ring_full_hist", num_elements, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -2566,21 +2442,21 @@ htt_print_tx_de_cmn_stats_tlv(const void *tag_buf, struct debug_htt_stats_req *s
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_DE_CMN_STATS_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u",
- htt_stats_buf->mac_id__word & 0xFF);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tcl2fw_entry_count = %u",
- htt_stats_buf->tcl2fw_entry_count);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "not_to_fw = %u",
- htt_stats_buf->not_to_fw);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "invalid_pdev_vdev_peer = %u",
- htt_stats_buf->invalid_pdev_vdev_peer);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tcl_res_invalid_addrx = %u",
- htt_stats_buf->tcl_res_invalid_addrx);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "wbm2fw_entry_count = %u",
- htt_stats_buf->wbm2fw_entry_count);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "invalid_pdev = %u\n",
- htt_stats_buf->invalid_pdev);
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_DE_CMN_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n",
+ FIELD_GET(HTT_STATS_MAC_ID, htt_stats_buf->mac_id__word));
+ len += scnprintf(buf + len, buf_len - len, "tcl2fw_entry_count = %u\n",
+ htt_stats_buf->tcl2fw_entry_count);
+ len += scnprintf(buf + len, buf_len - len, "not_to_fw = %u\n",
+ htt_stats_buf->not_to_fw);
+ len += scnprintf(buf + len, buf_len - len, "invalid_pdev_vdev_peer = %u\n",
+ htt_stats_buf->invalid_pdev_vdev_peer);
+ len += scnprintf(buf + len, buf_len - len, "tcl_res_invalid_addrx = %u\n",
+ htt_stats_buf->tcl_res_invalid_addrx);
+ len += scnprintf(buf + len, buf_len - len, "wbm2fw_entry_count = %u\n",
+ htt_stats_buf->wbm2fw_entry_count);
+ len += scnprintf(buf + len, buf_len - len, "invalid_pdev = %u\n\n",
+ htt_stats_buf->invalid_pdev);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -2597,52 +2473,51 @@ static inline void htt_print_ring_if_stats_tlv(const void *tag_buf,
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- char low_wm_hit_count[HTT_MAX_STRING_LEN] = {0};
- char high_wm_hit_count[HTT_MAX_STRING_LEN] = {0};
-
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_RING_IF_STATS_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "base_addr = %u",
- htt_stats_buf->base_addr);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "elem_size = %u",
- htt_stats_buf->elem_size);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "num_elems = %u",
- htt_stats_buf->num_elems__prefetch_tail_idx & 0xFFFF);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "prefetch_tail_idx = %u",
- (htt_stats_buf->num_elems__prefetch_tail_idx &
- 0xFFFF0000) >> 16);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "head_idx = %u",
- htt_stats_buf->head_idx__tail_idx & 0xFFFF);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tail_idx = %u",
- (htt_stats_buf->head_idx__tail_idx & 0xFFFF0000) >> 16);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "shadow_head_idx = %u",
- htt_stats_buf->shadow_head_idx__shadow_tail_idx & 0xFFFF);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "shadow_tail_idx = %u",
- (htt_stats_buf->shadow_head_idx__shadow_tail_idx &
- 0xFFFF0000) >> 16);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "num_tail_incr = %u",
- htt_stats_buf->num_tail_incr);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "lwm_thresh = %u",
- htt_stats_buf->lwm_thresh__hwm_thresh & 0xFFFF);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "hwm_thresh = %u",
- (htt_stats_buf->lwm_thresh__hwm_thresh & 0xFFFF0000) >> 16);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "overrun_hit_count = %u",
- htt_stats_buf->overrun_hit_count);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "underrun_hit_count = %u",
- htt_stats_buf->underrun_hit_count);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "prod_blockwait_count = %u",
- htt_stats_buf->prod_blockwait_count);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "cons_blockwait_count = %u",
- htt_stats_buf->cons_blockwait_count);
-
- ARRAY_TO_STRING(low_wm_hit_count, htt_stats_buf->low_wm_hit_count,
- HTT_STATS_LOW_WM_BINS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "low_wm_hit_count = %s ",
- low_wm_hit_count);
-
- ARRAY_TO_STRING(high_wm_hit_count, htt_stats_buf->high_wm_hit_count,
- HTT_STATS_HIGH_WM_BINS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "high_wm_hit_count = %s\n",
- high_wm_hit_count);
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_RING_IF_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "base_addr = %u\n",
+ htt_stats_buf->base_addr);
+ len += scnprintf(buf + len, buf_len - len, "elem_size = %u\n",
+ htt_stats_buf->elem_size);
+ len += scnprintf(buf + len, buf_len - len, "num_elems = %lu\n",
+ FIELD_GET(HTT_RING_IF_STATS_NUM_ELEMS,
+ htt_stats_buf->num_elems__prefetch_tail_idx));
+ len += scnprintf(buf + len, buf_len - len, "prefetch_tail_idx = %lu\n",
+ FIELD_GET(HTT_RING_IF_STATS_PREFETCH_TAIL_INDEX,
+ htt_stats_buf->num_elems__prefetch_tail_idx));
+ len += scnprintf(buf + len, buf_len - len, "head_idx = %lu\n",
+ FIELD_GET(HTT_RING_IF_STATS_HEAD_IDX,
+ htt_stats_buf->head_idx__tail_idx));
+ len += scnprintf(buf + len, buf_len - len, "tail_idx = %lu\n",
+ FIELD_GET(HTT_RING_IF_STATS_TAIL_IDX,
+ htt_stats_buf->head_idx__tail_idx));
+ len += scnprintf(buf + len, buf_len - len, "shadow_head_idx = %lu\n",
+ FIELD_GET(HTT_RING_IF_STATS_SHADOW_HEAD_IDX,
+ htt_stats_buf->shadow_head_idx__shadow_tail_idx));
+ len += scnprintf(buf + len, buf_len - len, "shadow_tail_idx = %lu\n",
+ FIELD_GET(HTT_RING_IF_STATS_SHADOW_TAIL_IDX,
+ htt_stats_buf->shadow_head_idx__shadow_tail_idx));
+ len += scnprintf(buf + len, buf_len - len, "num_tail_incr = %u\n",
+ htt_stats_buf->num_tail_incr);
+ len += scnprintf(buf + len, buf_len - len, "lwm_thresh = %lu\n",
+ FIELD_GET(HTT_RING_IF_STATS_LWM_THRESH,
+ htt_stats_buf->lwm_thresh__hwm_thresh));
+ len += scnprintf(buf + len, buf_len - len, "hwm_thresh = %lu\n",
+ FIELD_GET(HTT_RING_IF_STATS_HWM_THRESH,
+ htt_stats_buf->lwm_thresh__hwm_thresh));
+ len += scnprintf(buf + len, buf_len - len, "overrun_hit_count = %u\n",
+ htt_stats_buf->overrun_hit_count);
+ len += scnprintf(buf + len, buf_len - len, "underrun_hit_count = %u\n",
+ htt_stats_buf->underrun_hit_count);
+ len += scnprintf(buf + len, buf_len - len, "prod_blockwait_count = %u\n",
+ htt_stats_buf->prod_blockwait_count);
+ len += scnprintf(buf + len, buf_len - len, "cons_blockwait_count = %u\n",
+ htt_stats_buf->cons_blockwait_count);
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->low_wm_hit_count,
+ "low_wm_hit_count", HTT_STATS_LOW_WM_BINS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->high_wm_hit_count,
+ "high_wm_hit_count", HTT_STATS_HIGH_WM_BINS, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -2660,11 +2535,11 @@ static inline void htt_print_ring_if_cmn_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_RING_IF_CMN_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u",
- htt_stats_buf->mac_id__word & 0xFF);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "num_records = %u\n",
- htt_stats_buf->num_records);
+ len += scnprintf(buf + len, buf_len - len, "HTT_RING_IF_CMN_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n",
+ FIELD_GET(HTT_STATS_MAC_ID, htt_stats_buf->mac_id__word));
+ len += scnprintf(buf + len, buf_len - len, "num_records = %u\n\n",
+ htt_stats_buf->num_records);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -2682,16 +2557,12 @@ static inline void htt_print_sfm_client_user_tlv_v(const void *tag_buf,
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- char dwords_used_by_user_n[HTT_MAX_STRING_LEN] = {0};
u16 num_elems = tag_len >> 2;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_SFM_CLIENT_USER_TLV_V:");
+ len += scnprintf(buf + len, buf_len - len, "HTT_SFM_CLIENT_USER_TLV_V:\n");
- ARRAY_TO_STRING(dwords_used_by_user_n,
- htt_stats_buf->dwords_used_by_user_n,
- num_elems);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "dwords_used_by_user_n = %s\n",
- dwords_used_by_user_n);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->dwords_used_by_user_n,
+ "dwords_used_by_user_n", num_elems, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -2709,21 +2580,21 @@ static inline void htt_print_sfm_client_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_SFM_CLIENT_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "client_id = %u",
- htt_stats_buf->client_id);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "buf_min = %u",
- htt_stats_buf->buf_min);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "buf_max = %u",
- htt_stats_buf->buf_max);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "buf_busy = %u",
- htt_stats_buf->buf_busy);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "buf_alloc = %u",
- htt_stats_buf->buf_alloc);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "buf_avail = %u",
- htt_stats_buf->buf_avail);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "num_users = %u\n",
- htt_stats_buf->num_users);
+ len += scnprintf(buf + len, buf_len - len, "HTT_SFM_CLIENT_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "client_id = %u\n",
+ htt_stats_buf->client_id);
+ len += scnprintf(buf + len, buf_len - len, "buf_min = %u\n",
+ htt_stats_buf->buf_min);
+ len += scnprintf(buf + len, buf_len - len, "buf_max = %u\n",
+ htt_stats_buf->buf_max);
+ len += scnprintf(buf + len, buf_len - len, "buf_busy = %u\n",
+ htt_stats_buf->buf_busy);
+ len += scnprintf(buf + len, buf_len - len, "buf_alloc = %u\n",
+ htt_stats_buf->buf_alloc);
+ len += scnprintf(buf + len, buf_len - len, "buf_avail = %u\n",
+ htt_stats_buf->buf_avail);
+ len += scnprintf(buf + len, buf_len - len, "num_users = %u\n\n",
+ htt_stats_buf->num_users);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -2741,17 +2612,17 @@ static inline void htt_print_sfm_cmn_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_SFM_CMN_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u",
- htt_stats_buf->mac_id__word & 0xFF);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "buf_total = %u",
- htt_stats_buf->buf_total);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mem_empty = %u",
- htt_stats_buf->mem_empty);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "deallocate_bufs = %u",
- htt_stats_buf->deallocate_bufs);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "num_records = %u\n",
- htt_stats_buf->num_records);
+ len += scnprintf(buf + len, buf_len - len, "HTT_SFM_CMN_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n",
+ FIELD_GET(HTT_STATS_MAC_ID, htt_stats_buf->mac_id__word));
+ len += scnprintf(buf + len, buf_len - len, "buf_total = %u\n",
+ htt_stats_buf->buf_total);
+ len += scnprintf(buf + len, buf_len - len, "mem_empty = %u\n",
+ htt_stats_buf->mem_empty);
+ len += scnprintf(buf + len, buf_len - len, "deallocate_bufs = %u\n",
+ htt_stats_buf->deallocate_bufs);
+ len += scnprintf(buf + len, buf_len - len, "num_records = %u\n\n",
+ htt_stats_buf->num_records);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -2769,42 +2640,51 @@ static inline void htt_print_sring_stats_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_SRING_STATS_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u",
- htt_stats_buf->mac_id__ring_id__arena__ep & 0xFF);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ring_id = %u",
- (htt_stats_buf->mac_id__ring_id__arena__ep & 0xFF00) >> 8);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "arena = %u",
- (htt_stats_buf->mac_id__ring_id__arena__ep & 0xFF0000) >> 16);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ep = %u",
- (htt_stats_buf->mac_id__ring_id__arena__ep & 0x1000000) >> 24);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "base_addr_lsb = 0x%x",
- htt_stats_buf->base_addr_lsb);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "base_addr_msb = 0x%x",
- htt_stats_buf->base_addr_msb);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ring_size = %u",
- htt_stats_buf->ring_size);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "elem_size = %u",
- htt_stats_buf->elem_size);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "num_avail_words = %u",
- htt_stats_buf->num_avail_words__num_valid_words & 0xFFFF);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "num_valid_words = %u",
- (htt_stats_buf->num_avail_words__num_valid_words &
- 0xFFFF0000) >> 16);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "head_ptr = %u",
- htt_stats_buf->head_ptr__tail_ptr & 0xFFFF);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tail_ptr = %u",
- (htt_stats_buf->head_ptr__tail_ptr & 0xFFFF0000) >> 16);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "consumer_empty = %u",
- htt_stats_buf->consumer_empty__producer_full & 0xFFFF);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "producer_full = %u",
- (htt_stats_buf->consumer_empty__producer_full &
- 0xFFFF0000) >> 16);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "prefetch_count = %u",
- htt_stats_buf->prefetch_count__internal_tail_ptr & 0xFFFF);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "internal_tail_ptr = %u\n",
- (htt_stats_buf->prefetch_count__internal_tail_ptr &
- 0xFFFF0000) >> 16);
+ len += scnprintf(buf + len, buf_len - len, "HTT_SRING_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n",
+ FIELD_GET(HTT_SRING_STATS_MAC_ID,
+ htt_stats_buf->mac_id__ring_id__arena__ep));
+ len += scnprintf(buf + len, buf_len - len, "ring_id = %lu\n",
+ FIELD_GET(HTT_SRING_STATS_RING_ID,
+ htt_stats_buf->mac_id__ring_id__arena__ep));
+ len += scnprintf(buf + len, buf_len - len, "arena = %lu\n",
+ FIELD_GET(HTT_SRING_STATS_ARENA,
+ htt_stats_buf->mac_id__ring_id__arena__ep));
+ len += scnprintf(buf + len, buf_len - len, "ep = %lu\n",
+ FIELD_GET(HTT_SRING_STATS_EP,
+ htt_stats_buf->mac_id__ring_id__arena__ep));
+ len += scnprintf(buf + len, buf_len - len, "base_addr_lsb = 0x%x\n",
+ htt_stats_buf->base_addr_lsb);
+ len += scnprintf(buf + len, buf_len - len, "base_addr_msb = 0x%x\n",
+ htt_stats_buf->base_addr_msb);
+ len += scnprintf(buf + len, buf_len - len, "ring_size = %u\n",
+ htt_stats_buf->ring_size);
+ len += scnprintf(buf + len, buf_len - len, "elem_size = %u\n",
+ htt_stats_buf->elem_size);
+ len += scnprintf(buf + len, buf_len - len, "num_avail_words = %lu\n",
+ FIELD_GET(HTT_SRING_STATS_NUM_AVAIL_WORDS,
+ htt_stats_buf->num_avail_words__num_valid_words));
+ len += scnprintf(buf + len, buf_len - len, "num_valid_words = %lu\n",
+ FIELD_GET(HTT_SRING_STATS_NUM_VALID_WORDS,
+ htt_stats_buf->num_avail_words__num_valid_words));
+ len += scnprintf(buf + len, buf_len - len, "head_ptr = %lu\n",
+ FIELD_GET(HTT_SRING_STATS_HEAD_PTR,
+ htt_stats_buf->head_ptr__tail_ptr));
+ len += scnprintf(buf + len, buf_len - len, "tail_ptr = %lu\n",
+ FIELD_GET(HTT_SRING_STATS_TAIL_PTR,
+ htt_stats_buf->head_ptr__tail_ptr));
+ len += scnprintf(buf + len, buf_len - len, "consumer_empty = %lu\n",
+ FIELD_GET(HTT_SRING_STATS_CONSUMER_EMPTY,
+ htt_stats_buf->consumer_empty__producer_full));
+ len += scnprintf(buf + len, buf_len - len, "producer_full = %lu\n",
+ FIELD_GET(HTT_SRING_STATS_PRODUCER_FULL,
+ htt_stats_buf->consumer_empty__producer_full));
+ len += scnprintf(buf + len, buf_len - len, "prefetch_count = %lu\n",
+ FIELD_GET(HTT_SRING_STATS_PREFETCH_COUNT,
+ htt_stats_buf->prefetch_count__internal_tail_ptr));
+ len += scnprintf(buf + len, buf_len - len, "internal_tail_ptr = %lu\n\n",
+ FIELD_GET(HTT_SRING_STATS_INTERNAL_TAIL_PTR,
+ htt_stats_buf->prefetch_count__internal_tail_ptr));
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -2822,9 +2702,9 @@ static inline void htt_print_sring_cmn_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_SRING_CMN_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "num_records = %u\n",
- htt_stats_buf->num_records);
+ len += scnprintf(buf + len, buf_len - len, "HTT_SRING_CMN_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "num_records = %u\n\n",
+ htt_stats_buf->num_records);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -2842,165 +2722,115 @@ static inline void htt_print_tx_pdev_rate_stats_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
u8 j;
- char str_buf[HTT_MAX_STRING_LEN] = {0};
- char *tx_gi[HTT_TX_PEER_STATS_NUM_GI_COUNTERS] = {NULL};
-
- for (j = 0; j < HTT_TX_PEER_STATS_NUM_GI_COUNTERS; j++) {
- tx_gi[j] = kmalloc(HTT_MAX_STRING_LEN, GFP_ATOMIC);
- if (!tx_gi[j])
- goto fail;
- }
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_PDEV_RATE_STATS_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u",
- htt_stats_buf->mac_id__word & 0xFF);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_ldpc = %u",
- htt_stats_buf->tx_ldpc);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_mu_mimo_tx_ldpc = %u",
- htt_stats_buf->ac_mu_mimo_tx_ldpc);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_tx_ldpc = %u",
- htt_stats_buf->ax_mu_mimo_tx_ldpc);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ofdma_tx_ldpc = %u",
- htt_stats_buf->ofdma_tx_ldpc);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rts_cnt = %u",
- htt_stats_buf->rts_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rts_success = %u",
- htt_stats_buf->rts_success);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ack_rssi = %u",
- htt_stats_buf->ack_rssi);
-
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "Legacy CCK Rates: 1 Mbps: %u, 2 Mbps: %u, 5.5 Mbps: %u, 11 Mbps: %u",
- htt_stats_buf->tx_legacy_cck_rate[0],
- htt_stats_buf->tx_legacy_cck_rate[1],
- htt_stats_buf->tx_legacy_cck_rate[2],
- htt_stats_buf->tx_legacy_cck_rate[3]);
-
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "Legacy OFDM Rates: 6 Mbps: %u, 9 Mbps: %u, 12 Mbps: %u, 18 Mbps: %u\n"
- " 24 Mbps: %u, 36 Mbps: %u, 48 Mbps: %u, 54 Mbps: %u",
- htt_stats_buf->tx_legacy_ofdm_rate[0],
- htt_stats_buf->tx_legacy_ofdm_rate[1],
- htt_stats_buf->tx_legacy_ofdm_rate[2],
- htt_stats_buf->tx_legacy_ofdm_rate[3],
- htt_stats_buf->tx_legacy_ofdm_rate[4],
- htt_stats_buf->tx_legacy_ofdm_rate[5],
- htt_stats_buf->tx_legacy_ofdm_rate[6],
- htt_stats_buf->tx_legacy_ofdm_rate[7]);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->tx_mcs,
- HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_mcs = %s ", str_buf);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->ac_mu_mimo_tx_mcs,
- HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_mu_mimo_tx_mcs = %s ", str_buf);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->ax_mu_mimo_tx_mcs,
- HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_tx_mcs = %s ", str_buf);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->ofdma_tx_mcs,
- HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ofdma_tx_mcs = %s ", str_buf);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->tx_nss,
- HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_nss = %s ", str_buf);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->ac_mu_mimo_tx_nss,
- HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_mu_mimo_tx_nss = %s ", str_buf);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->ax_mu_mimo_tx_nss,
- HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_tx_nss = %s ", str_buf);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->ofdma_tx_nss,
- HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ofdma_tx_nss = %s ", str_buf);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->tx_bw,
- HTT_TX_PDEV_STATS_NUM_BW_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_bw = %s ", str_buf);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->ac_mu_mimo_tx_bw,
- HTT_TX_PDEV_STATS_NUM_BW_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_mu_mimo_tx_bw = %s ", str_buf);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->ax_mu_mimo_tx_bw,
- HTT_TX_PDEV_STATS_NUM_BW_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_tx_bw = %s ", str_buf);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->ofdma_tx_bw,
- HTT_TX_PDEV_STATS_NUM_BW_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ofdma_tx_bw = %s ", str_buf);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->tx_stbc,
- HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_stbc = %s ", str_buf);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->tx_pream,
- HTT_TX_PDEV_STATS_NUM_PREAMBLE_TYPES);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_pream = %s ", str_buf);
-
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HE LTF: 1x: %u, 2x: %u, 4x: %u",
- htt_stats_buf->tx_he_ltf[1],
- htt_stats_buf->tx_he_ltf[2],
- htt_stats_buf->tx_he_ltf[3]);
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_PDEV_RATE_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n",
+ FIELD_GET(HTT_STATS_MAC_ID, htt_stats_buf->mac_id__word));
+ len += scnprintf(buf + len, buf_len - len, "tx_ldpc = %u\n",
+ htt_stats_buf->tx_ldpc);
+ len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_tx_ldpc = %u\n",
+ htt_stats_buf->ac_mu_mimo_tx_ldpc);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_tx_ldpc = %u\n",
+ htt_stats_buf->ax_mu_mimo_tx_ldpc);
+ len += scnprintf(buf + len, buf_len - len, "ofdma_tx_ldpc = %u\n",
+ htt_stats_buf->ofdma_tx_ldpc);
+ len += scnprintf(buf + len, buf_len - len, "rts_cnt = %u\n",
+ htt_stats_buf->rts_cnt);
+ len += scnprintf(buf + len, buf_len - len, "rts_success = %u\n",
+ htt_stats_buf->rts_success);
+ len += scnprintf(buf + len, buf_len - len, "ack_rssi = %u\n",
+ htt_stats_buf->ack_rssi);
+
+ len += scnprintf(buf + len, buf_len - len,
+ "Legacy CCK Rates: 1 Mbps: %u, 2 Mbps: %u, 5.5 Mbps: %u, 11 Mbps: %u\n",
+ htt_stats_buf->tx_legacy_cck_rate[0],
+ htt_stats_buf->tx_legacy_cck_rate[1],
+ htt_stats_buf->tx_legacy_cck_rate[2],
+ htt_stats_buf->tx_legacy_cck_rate[3]);
+
+ len += scnprintf(buf + len, buf_len - len,
+ "Legacy OFDM Rates: 6 Mbps: %u, 9 Mbps: %u, 12 Mbps: %u, 18 Mbps: %u\n"
+ " 24 Mbps: %u, 36 Mbps: %u, 48 Mbps: %u, 54 Mbps: %u\n",
+ htt_stats_buf->tx_legacy_ofdm_rate[0],
+ htt_stats_buf->tx_legacy_ofdm_rate[1],
+ htt_stats_buf->tx_legacy_ofdm_rate[2],
+ htt_stats_buf->tx_legacy_ofdm_rate[3],
+ htt_stats_buf->tx_legacy_ofdm_rate[4],
+ htt_stats_buf->tx_legacy_ofdm_rate[5],
+ htt_stats_buf->tx_legacy_ofdm_rate[6],
+ htt_stats_buf->tx_legacy_ofdm_rate[7]);
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_mcs, "tx_mcs",
+ HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ac_mu_mimo_tx_mcs,
+ "ac_mu_mimo_tx_mcs", HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ax_mu_mimo_tx_mcs,
+ "ax_mu_mimo_tx_mcs", HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ofdma_tx_mcs, "ofdma_tx_mcs",
+ HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_nss, "tx_nss",
+ HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ac_mu_mimo_tx_nss,
+ "ac_mu_mimo_tx_nss",
+ HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ax_mu_mimo_tx_nss,
+ "ax_mu_mimo_tx_nss",
+ HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ofdma_tx_nss, "ofdma_tx_nss",
+ HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_bw, "tx_bw",
+ HTT_TX_PDEV_STATS_NUM_BW_COUNTERS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ac_mu_mimo_tx_bw,
+ "ac_mu_mimo_tx_bw", HTT_TX_PDEV_STATS_NUM_BW_COUNTERS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ax_mu_mimo_tx_bw,
+ "ax_mu_mimo_tx_bw",
+ HTT_TX_PDEV_STATS_NUM_BW_COUNTERS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ofdma_tx_bw, "ofdma_tx_bw",
+ HTT_TX_PDEV_STATS_NUM_BW_COUNTERS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_stbc, "tx_stbc",
+ HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_pream, "tx_pream",
+ HTT_TX_PDEV_STATS_NUM_PREAMBLE_TYPES, "\n");
+
+ len += scnprintf(buf + len, buf_len - len, "HE LTF: 1x: %u, 2x: %u, 4x: %u\n",
+ htt_stats_buf->tx_he_ltf[1],
+ htt_stats_buf->tx_he_ltf[2],
+ htt_stats_buf->tx_he_ltf[3]);
/* SU GI Stats */
for (j = 0; j < HTT_TX_PDEV_STATS_NUM_GI_COUNTERS; j++) {
- ARRAY_TO_STRING(tx_gi[j], htt_stats_buf->tx_gi[j],
- HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_gi[%u] = %s ",
- j, tx_gi[j]);
+ len += scnprintf(buf + len, (buf_len - len),
+ "tx_gi[%u] = ", j);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_gi[j], NULL,
+ HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
}
/* AC MU-MIMO GI Stats */
for (j = 0; j < HTT_TX_PDEV_STATS_NUM_GI_COUNTERS; j++) {
- ARRAY_TO_STRING(tx_gi[j], htt_stats_buf->ac_mu_mimo_tx_gi[j],
- HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ac_mu_mimo_tx_gi[%u] = %s ",
- j, tx_gi[j]);
+ len += scnprintf(buf + len, (buf_len - len),
+ "ac_mu_mimo_tx_gi[%u] = ", j);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ac_mu_mimo_tx_gi[j],
+ NULL, HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
}
/* AX MU-MIMO GI Stats */
for (j = 0; j < HTT_TX_PDEV_STATS_NUM_GI_COUNTERS; j++) {
- ARRAY_TO_STRING(tx_gi[j], htt_stats_buf->ax_mu_mimo_tx_gi[j],
- HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ax_mu_mimo_tx_gi[%u] = %s ",
- j, tx_gi[j]);
+ len += scnprintf(buf + len, (buf_len - len),
+ "ax_mu_mimo_tx_gi[%u] = ", j);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ax_mu_mimo_tx_gi[j],
+ NULL, HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
}
/* DL OFDMA GI Stats */
for (j = 0; j < HTT_TX_PDEV_STATS_NUM_GI_COUNTERS; j++) {
- ARRAY_TO_STRING(tx_gi[j], htt_stats_buf->ofdma_tx_gi[j],
- HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ofdma_tx_gi[%u] = %s ",
- j, tx_gi[j]);
+ len += scnprintf(buf + len, (buf_len - len),
+ "ofdma_tx_gi[%u] = ", j);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ofdma_tx_gi[j], NULL,
+ HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
}
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->tx_dcm,
- HTT_TX_PDEV_STATS_NUM_DCM_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_dcm = %s\n", str_buf);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_dcm, "tx_dcm",
+ HTT_TX_PDEV_STATS_NUM_DCM_COUNTERS, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -3008,9 +2838,6 @@ static inline void htt_print_tx_pdev_rate_stats_tlv(const void *tag_buf,
buf[len] = 0;
stats_req->buf_len = len;
-fail:
- for (j = 0; j < HTT_TX_PEER_STATS_NUM_GI_COUNTERS; j++)
- kfree(tx_gi[j]);
}
static inline void htt_print_rx_pdev_rate_stats_tlv(const void *tag_buf,
@@ -3021,226 +2848,168 @@ static inline void htt_print_rx_pdev_rate_stats_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
u8 i, j;
- u16 index = 0;
- char *rssi_chain[HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS] = {NULL};
- char *rx_gi[HTT_RX_PDEV_STATS_NUM_GI_COUNTERS] = {NULL};
- char str_buf[HTT_MAX_STRING_LEN] = {0};
- char *rx_pilot_evm_db[HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS] = {NULL};
- for (j = 0; j < HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++) {
- rssi_chain[j] = kmalloc(HTT_MAX_STRING_LEN, GFP_ATOMIC);
- if (!rssi_chain[j])
- goto fail;
- }
-
- for (j = 0; j < HTT_RX_PDEV_STATS_NUM_GI_COUNTERS; j++) {
- rx_gi[j] = kmalloc(HTT_MAX_STRING_LEN, GFP_ATOMIC);
- if (!rx_gi[j])
- goto fail;
- }
-
- for (j = 0; j < HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++) {
- rx_pilot_evm_db[j] = kmalloc(HTT_MAX_STRING_LEN, GFP_ATOMIC);
- if (!rx_pilot_evm_db[j])
- goto fail;
- }
-
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_RX_PDEV_RATE_STATS_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u",
- htt_stats_buf->mac_id__word & 0xFF);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "nsts = %u",
- htt_stats_buf->nsts);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_ldpc = %u",
- htt_stats_buf->rx_ldpc);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rts_cnt = %u",
- htt_stats_buf->rts_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rssi_mgmt = %u",
- htt_stats_buf->rssi_mgmt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rssi_data = %u",
- htt_stats_buf->rssi_data);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rssi_comb = %u",
- htt_stats_buf->rssi_comb);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rssi_in_dbm = %d",
- htt_stats_buf->rssi_in_dbm);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_mcs,
- HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_mcs = %s ", str_buf);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_nss,
- HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_nss = %s ", str_buf);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_dcm,
- HTT_RX_PDEV_STATS_NUM_DCM_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_dcm = %s ", str_buf);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_stbc,
- HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_stbc = %s ", str_buf);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_bw,
- HTT_RX_PDEV_STATS_NUM_BW_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_bw = %s ", str_buf);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_evm_nss_count = %u",
- htt_stats_buf->nss_count);
-
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_evm_pilot_count = %u",
- htt_stats_buf->pilot_count);
+ len += scnprintf(buf + len, buf_len - len, "HTT_RX_PDEV_RATE_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n",
+ FIELD_GET(HTT_STATS_MAC_ID, htt_stats_buf->mac_id__word));
+ len += scnprintf(buf + len, buf_len - len, "nsts = %u\n",
+ htt_stats_buf->nsts);
+ len += scnprintf(buf + len, buf_len - len, "rx_ldpc = %u\n",
+ htt_stats_buf->rx_ldpc);
+ len += scnprintf(buf + len, buf_len - len, "rts_cnt = %u\n",
+ htt_stats_buf->rts_cnt);
+ len += scnprintf(buf + len, buf_len - len, "rssi_mgmt = %u\n",
+ htt_stats_buf->rssi_mgmt);
+ len += scnprintf(buf + len, buf_len - len, "rssi_data = %u\n",
+ htt_stats_buf->rssi_data);
+ len += scnprintf(buf + len, buf_len - len, "rssi_comb = %u\n",
+ htt_stats_buf->rssi_comb);
+ len += scnprintf(buf + len, buf_len - len, "rssi_in_dbm = %d\n",
+ htt_stats_buf->rssi_in_dbm);
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_mcs, "rx_mcs",
+ HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_nss, "rx_nss",
+ HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_dcm, "rx_dcm",
+ HTT_RX_PDEV_STATS_NUM_DCM_COUNTERS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_stbc, "rx_stbc",
+ HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_bw, "rx_bw",
+ HTT_RX_PDEV_STATS_NUM_BW_COUNTERS, "\n");
+
+ len += scnprintf(buf + len, buf_len - len, "rx_evm_nss_count = %u\n",
+ htt_stats_buf->nss_count);
+
+ len += scnprintf(buf + len, buf_len - len, "rx_evm_pilot_count = %u\n",
+ htt_stats_buf->pilot_count);
for (j = 0; j < HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++) {
- index = 0;
-
+ len += scnprintf(buf + len, buf_len - len,
+ "pilot_evm_db[%u] = ", j);
for (i = 0; i < HTT_RX_PDEV_STATS_RXEVM_MAX_PILOTS_PER_NSS; i++)
- index += scnprintf(&rx_pilot_evm_db[j][index],
- HTT_MAX_STRING_LEN - index,
- " %u:%d,",
- i,
- htt_stats_buf->rx_pilot_evm_db[j][i]);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "pilot_evm_dB[%u] = %s ",
- j, rx_pilot_evm_db[j]);
+ len += scnprintf(buf + len,
+ buf_len - len,
+ " %u:%d,",
+ i,
+ htt_stats_buf->rx_pilot_evm_db[j][i]);
+ len += scnprintf(buf + len, buf_len - len, "\n");
}
- index = 0;
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ len += scnprintf(buf + len, buf_len - len,
+ "pilot_evm_db_mean = ");
for (i = 0; i < HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; i++)
- index += scnprintf(&str_buf[index],
- HTT_MAX_STRING_LEN - index,
- " %u:%d,", i, htt_stats_buf->rx_pilot_evm_db_mean[i]);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "pilot_evm_dB_mean = %s ", str_buf);
+ len += scnprintf(buf + len,
+ buf_len - len,
+ " %u:%d,", i,
+ htt_stats_buf->rx_pilot_evm_db_mean[i]);
+ len += scnprintf(buf + len, buf_len - len, "\n");
for (j = 0; j < HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++) {
- ARRAY_TO_STRING(rssi_chain[j], htt_stats_buf->rssi_chain[j],
- HTT_RX_PDEV_STATS_NUM_BW_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rssi_chain[%u] = %s ",
- j, rssi_chain[j]);
+ len += scnprintf(buf + len, buf_len - len,
+ "rssi_chain[%u] = ", j);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rssi_chain[j], NULL,
+ HTT_RX_PDEV_STATS_NUM_BW_COUNTERS, "\n");
}
for (j = 0; j < HTT_RX_PDEV_STATS_NUM_GI_COUNTERS; j++) {
- ARRAY_TO_STRING(rx_gi[j], htt_stats_buf->rx_gi[j],
- HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_gi[%u] = %s ",
- j, rx_gi[j]);
+ len += scnprintf(buf + len, buf_len - len,
+ "rx_gi[%u] = ", j);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_gi[j], NULL,
+ HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
}
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_pream,
- HTT_RX_PDEV_STATS_NUM_PREAMBLE_TYPES);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_pream = %s", str_buf);
-
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_11ax_su_ext = %u",
- htt_stats_buf->rx_11ax_su_ext);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_11ac_mumimo = %u",
- htt_stats_buf->rx_11ac_mumimo);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_11ax_mumimo = %u",
- htt_stats_buf->rx_11ax_mumimo);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_11ax_ofdma = %u",
- htt_stats_buf->rx_11ax_ofdma);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "txbf = %u",
- htt_stats_buf->txbf);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_legacy_cck_rate,
- HTT_RX_PDEV_STATS_NUM_LEGACY_CCK_STATS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_legacy_cck_rate = %s ",
- str_buf);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_legacy_ofdm_rate,
- HTT_RX_PDEV_STATS_NUM_LEGACY_OFDM_STATS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_legacy_ofdm_rate = %s ",
- str_buf);
-
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_active_dur_us_low = %u",
- htt_stats_buf->rx_active_dur_us_low);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_active_dur_us_high = %u",
- htt_stats_buf->rx_active_dur_us_high);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_11ax_ul_ofdma = %u",
- htt_stats_buf->rx_11ax_ul_ofdma);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->ul_ofdma_rx_mcs,
- HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ul_ofdma_rx_mcs = %s ", str_buf);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_pream, "rx_pream",
+ HTT_RX_PDEV_STATS_NUM_PREAMBLE_TYPES, "\n");
+
+ len += scnprintf(buf + len, buf_len - len, "rx_11ax_su_ext = %u\n",
+ htt_stats_buf->rx_11ax_su_ext);
+ len += scnprintf(buf + len, buf_len - len, "rx_11ac_mumimo = %u\n",
+ htt_stats_buf->rx_11ac_mumimo);
+ len += scnprintf(buf + len, buf_len - len, "rx_11ax_mumimo = %u\n",
+ htt_stats_buf->rx_11ax_mumimo);
+ len += scnprintf(buf + len, buf_len - len, "rx_11ax_ofdma = %u\n",
+ htt_stats_buf->rx_11ax_ofdma);
+ len += scnprintf(buf + len, buf_len - len, "txbf = %u\n",
+ htt_stats_buf->txbf);
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_legacy_cck_rate,
+ "rx_legacy_cck_rate",
+ HTT_RX_PDEV_STATS_NUM_LEGACY_CCK_STATS, "\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_legacy_ofdm_rate,
+ "rx_legacy_ofdm_rate",
+ HTT_RX_PDEV_STATS_NUM_LEGACY_OFDM_STATS, "\n");
+
+ len += scnprintf(buf + len, buf_len - len, "rx_active_dur_us_low = %u\n",
+ htt_stats_buf->rx_active_dur_us_low);
+ len += scnprintf(buf + len, buf_len - len, "rx_active_dur_us_high = %u\n",
+ htt_stats_buf->rx_active_dur_us_high);
+ len += scnprintf(buf + len, buf_len - len, "rx_11ax_ul_ofdma = %u\n",
+ htt_stats_buf->rx_11ax_ul_ofdma);
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ul_ofdma_rx_mcs,
+ "ul_ofdma_rx_mcs",
+ HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
for (j = 0; j < HTT_RX_PDEV_STATS_NUM_GI_COUNTERS; j++) {
- ARRAY_TO_STRING(rx_gi[j], htt_stats_buf->ul_ofdma_rx_gi[j],
- HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ul_ofdma_rx_gi[%u] = %s ",
- j, rx_gi[j]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ul_ofdma_rx_gi[%u] = ", j);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ul_ofdma_rx_gi[j], NULL,
+ HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
}
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->ul_ofdma_rx_nss,
- HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ul_ofdma_rx_nss = %s ", str_buf);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ul_ofdma_rx_nss,
+ "ul_ofdma_rx_nss",
+ HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS, "\n");
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->ul_ofdma_rx_bw,
- HTT_RX_PDEV_STATS_NUM_BW_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ul_ofdma_rx_bw = %s ", str_buf);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ul_ofdma_rx_bw, "ul_ofdma_rx_bw",
+ HTT_RX_PDEV_STATS_NUM_BW_COUNTERS, "\n");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ul_ofdma_rx_stbc = %u",
+ len += scnprintf(buf + len, buf_len - len, "ul_ofdma_rx_stbc = %u\n",
htt_stats_buf->ul_ofdma_rx_stbc);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ul_ofdma_rx_ldpc = %u",
+ len += scnprintf(buf + len, buf_len - len, "ul_ofdma_rx_ldpc = %u\n",
htt_stats_buf->ul_ofdma_rx_ldpc);
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_ulofdma_non_data_ppdu,
- HTT_RX_PDEV_MAX_OFDMA_NUM_USER);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_ulofdma_non_data_ppdu = %s ",
- str_buf);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_ulofdma_data_ppdu,
- HTT_RX_PDEV_MAX_OFDMA_NUM_USER);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_ulofdma_data_ppdu = %s ",
- str_buf);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_ulofdma_mpdu_ok,
- HTT_RX_PDEV_MAX_OFDMA_NUM_USER);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_ulofdma_mpdu_ok = %s ", str_buf);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_ulofdma_mpdu_fail,
- HTT_RX_PDEV_MAX_OFDMA_NUM_USER);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_ulofdma_mpdu_fail = %s",
- str_buf);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_ulofdma_non_data_ppdu,
+ "rx_ulofdma_non_data_ppdu",
+ HTT_RX_PDEV_MAX_OFDMA_NUM_USER, "\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_ulofdma_data_ppdu,
+ "rx_ulofdma_data_ppdu", HTT_RX_PDEV_MAX_OFDMA_NUM_USER, "\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_ulofdma_mpdu_ok,
+ "rx_ulofdma_mpdu_ok", HTT_RX_PDEV_MAX_OFDMA_NUM_USER, "\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_ulofdma_mpdu_fail,
+ "rx_ulofdma_mpdu_fail", HTT_RX_PDEV_MAX_OFDMA_NUM_USER, "\n");
for (j = 0; j < HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++) {
- index = 0;
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ len += scnprintf(buf + len, buf_len - len,
+ "rx_ul_fd_rssi: nss[%u] = ", j);
for (i = 0; i < HTT_RX_PDEV_MAX_OFDMA_NUM_USER; i++)
- index += scnprintf(&str_buf[index],
- HTT_MAX_STRING_LEN - index,
- " %u:%d,",
- i, htt_stats_buf->rx_ul_fd_rssi[j][i]);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "rx_ul_fd_rssi: nss[%u] = %s", j, str_buf);
+ len += scnprintf(buf + len,
+ buf_len - len,
+ " %u:%d,",
+ i, htt_stats_buf->rx_ul_fd_rssi[j][i]);
+ len += scnprintf(buf + len, buf_len - len, "\n");
}
- len += HTT_DBG_OUT(buf + len, buf_len - len, "per_chain_rssi_pkt_type = %#x",
- htt_stats_buf->per_chain_rssi_pkt_type);
+ len += scnprintf(buf + len, buf_len - len, "per_chain_rssi_pkt_type = %#x\n",
+ htt_stats_buf->per_chain_rssi_pkt_type);
for (j = 0; j < HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++) {
- index = 0;
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ len += scnprintf(buf + len, buf_len - len,
+ "rx_per_chain_rssi_in_dbm[%u] = ", j);
for (i = 0; i < HTT_RX_PDEV_STATS_NUM_BW_COUNTERS; i++)
- index += scnprintf(&str_buf[index],
- HTT_MAX_STRING_LEN - index,
- " %u:%d,",
- i,
- htt_stats_buf->rx_per_chain_rssi_in_dbm[j][i]);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "rx_per_chain_rssi_in_dbm[%u] = %s ", j, str_buf);
+ len += scnprintf(buf + len,
+ buf_len - len,
+ " %u:%d,",
+ i,
+ htt_stats_buf->rx_per_chain_rssi_in_dbm[j][i]);
+ len += scnprintf(buf + len, buf_len - len, "\n");
}
- len += HTT_DBG_OUT(buf + len, buf_len - len, "\n");
+ len += scnprintf(buf + len, buf_len - len, "\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -3248,16 +3017,6 @@ static inline void htt_print_rx_pdev_rate_stats_tlv(const void *tag_buf,
buf[len] = 0;
stats_req->buf_len = len;
-
-fail:
- for (j = 0; j < HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++)
- kfree(rssi_chain[j]);
-
- for (j = 0; j < HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++)
- kfree(rx_pilot_evm_db[j]);
-
- for (i = 0; i < HTT_RX_PDEV_STATS_NUM_GI_COUNTERS; i++)
- kfree(rx_gi[i]);
}
static inline void htt_print_rx_soc_fw_stats_tlv(const void *tag_buf,
@@ -3268,34 +3027,34 @@ static inline void htt_print_rx_soc_fw_stats_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_RX_SOC_FW_STATS_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_reo_ring_data_msdu = %u",
- htt_stats_buf->fw_reo_ring_data_msdu);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_to_host_data_msdu_bcmc = %u",
- htt_stats_buf->fw_to_host_data_msdu_bcmc);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_to_host_data_msdu_uc = %u",
- htt_stats_buf->fw_to_host_data_msdu_uc);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ofld_remote_data_buf_recycle_cnt = %u",
- htt_stats_buf->ofld_remote_data_buf_recycle_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ofld_remote_free_buf_indication_cnt = %u",
- htt_stats_buf->ofld_remote_free_buf_indication_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ofld_buf_to_host_data_msdu_uc = %u",
- htt_stats_buf->ofld_buf_to_host_data_msdu_uc);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "reo_fw_ring_to_host_data_msdu_uc = %u",
- htt_stats_buf->reo_fw_ring_to_host_data_msdu_uc);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "wbm_sw_ring_reap = %u",
- htt_stats_buf->wbm_sw_ring_reap);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "wbm_forward_to_host_cnt = %u",
- htt_stats_buf->wbm_forward_to_host_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "wbm_target_recycle_cnt = %u",
- htt_stats_buf->wbm_target_recycle_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "target_refill_ring_recycle_cnt = %u",
- htt_stats_buf->target_refill_ring_recycle_cnt);
+ len += scnprintf(buf + len, buf_len - len, "HTT_RX_SOC_FW_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "fw_reo_ring_data_msdu = %u\n",
+ htt_stats_buf->fw_reo_ring_data_msdu);
+ len += scnprintf(buf + len, buf_len - len, "fw_to_host_data_msdu_bcmc = %u\n",
+ htt_stats_buf->fw_to_host_data_msdu_bcmc);
+ len += scnprintf(buf + len, buf_len - len, "fw_to_host_data_msdu_uc = %u\n",
+ htt_stats_buf->fw_to_host_data_msdu_uc);
+ len += scnprintf(buf + len, buf_len - len,
+ "ofld_remote_data_buf_recycle_cnt = %u\n",
+ htt_stats_buf->ofld_remote_data_buf_recycle_cnt);
+ len += scnprintf(buf + len, buf_len - len,
+ "ofld_remote_free_buf_indication_cnt = %u\n",
+ htt_stats_buf->ofld_remote_free_buf_indication_cnt);
+ len += scnprintf(buf + len, buf_len - len,
+ "ofld_buf_to_host_data_msdu_uc = %u\n",
+ htt_stats_buf->ofld_buf_to_host_data_msdu_uc);
+ len += scnprintf(buf + len, buf_len - len,
+ "reo_fw_ring_to_host_data_msdu_uc = %u\n",
+ htt_stats_buf->reo_fw_ring_to_host_data_msdu_uc);
+ len += scnprintf(buf + len, buf_len - len, "wbm_sw_ring_reap = %u\n",
+ htt_stats_buf->wbm_sw_ring_reap);
+ len += scnprintf(buf + len, buf_len - len, "wbm_forward_to_host_cnt = %u\n",
+ htt_stats_buf->wbm_forward_to_host_cnt);
+ len += scnprintf(buf + len, buf_len - len, "wbm_target_recycle_cnt = %u\n",
+ htt_stats_buf->wbm_target_recycle_cnt);
+ len += scnprintf(buf + len, buf_len - len,
+ "target_refill_ring_recycle_cnt = %u\n",
+ htt_stats_buf->target_refill_ring_recycle_cnt);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -3314,17 +3073,13 @@ htt_print_rx_soc_fw_refill_ring_empty_tlv_v(const void *tag_buf,
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- char refill_ring_empty_cnt[HTT_MAX_STRING_LEN] = {0};
u16 num_elems = min_t(u16, (tag_len >> 2), HTT_RX_STATS_REFILL_MAX_RING);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "HTT_RX_SOC_FW_REFILL_RING_EMPTY_TLV_V:");
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_RX_SOC_FW_REFILL_RING_EMPTY_TLV_V:\n");
- ARRAY_TO_STRING(refill_ring_empty_cnt,
- htt_stats_buf->refill_ring_empty_cnt,
- num_elems);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "refill_ring_empty_cnt = %s\n",
- refill_ring_empty_cnt);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->refill_ring_empty_cnt,
+ "refill_ring_empty_cnt", num_elems, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -3344,17 +3099,13 @@ htt_print_rx_soc_fw_refill_ring_num_rxdma_err_tlv_v(const void *tag_buf,
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- char rxdma_err_cnt[HTT_MAX_STRING_LEN] = {0};
u16 num_elems = min_t(u16, (tag_len >> 2), HTT_RX_RXDMA_MAX_ERR_CODE);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "HTT_RX_SOC_FW_REFILL_RING_NUM_RXDMA_ERR_TLV_V:");
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_RX_SOC_FW_REFILL_RING_NUM_RXDMA_ERR_TLV_V:\n");
- ARRAY_TO_STRING(rxdma_err_cnt,
- htt_stats_buf->rxdma_err,
- num_elems);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rxdma_err = %s\n",
- rxdma_err_cnt);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rxdma_err, "rxdma_err",
+ num_elems, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -3373,17 +3124,13 @@ htt_print_rx_soc_fw_refill_ring_num_reo_err_tlv_v(const void *tag_buf,
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- char reo_err_cnt[HTT_MAX_STRING_LEN] = {0};
u16 num_elems = min_t(u16, (tag_len >> 2), HTT_RX_REO_MAX_ERR_CODE);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "HTT_RX_SOC_FW_REFILL_RING_NUM_REO_ERR_TLV_V:");
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_RX_SOC_FW_REFILL_RING_NUM_REO_ERR_TLV_V:\n");
- ARRAY_TO_STRING(reo_err_cnt,
- htt_stats_buf->reo_err,
- num_elems);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "reo_err = %s\n",
- reo_err_cnt);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->reo_err, "reo_err",
+ num_elems, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -3402,27 +3149,27 @@ htt_print_rx_reo_debug_stats_tlv_v(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_RX_REO_RESOURCE_STATS_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "sample_id = %u",
- htt_stats_buf->sample_id);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "total_max = %u",
- htt_stats_buf->total_max);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "total_avg = %u",
- htt_stats_buf->total_avg);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "total_sample = %u",
- htt_stats_buf->total_sample);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "non_zeros_avg = %u",
- htt_stats_buf->non_zeros_avg);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "non_zeros_sample = %u",
- htt_stats_buf->non_zeros_sample);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "last_non_zeros_max = %u",
- htt_stats_buf->last_non_zeros_max);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "last_non_zeros_min %u",
- htt_stats_buf->last_non_zeros_min);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "last_non_zeros_avg %u",
- htt_stats_buf->last_non_zeros_avg);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "last_non_zeros_sample %u\n",
- htt_stats_buf->last_non_zeros_sample);
+ len += scnprintf(buf + len, buf_len - len, "HTT_RX_REO_RESOURCE_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "sample_id = %u\n",
+ htt_stats_buf->sample_id);
+ len += scnprintf(buf + len, buf_len - len, "total_max = %u\n",
+ htt_stats_buf->total_max);
+ len += scnprintf(buf + len, buf_len - len, "total_avg = %u\n",
+ htt_stats_buf->total_avg);
+ len += scnprintf(buf + len, buf_len - len, "total_sample = %u\n",
+ htt_stats_buf->total_sample);
+ len += scnprintf(buf + len, buf_len - len, "non_zeros_avg = %u\n",
+ htt_stats_buf->non_zeros_avg);
+ len += scnprintf(buf + len, buf_len - len, "non_zeros_sample = %u\n",
+ htt_stats_buf->non_zeros_sample);
+ len += scnprintf(buf + len, buf_len - len, "last_non_zeros_max = %u\n",
+ htt_stats_buf->last_non_zeros_max);
+ len += scnprintf(buf + len, buf_len - len, "last_non_zeros_min %u\n",
+ htt_stats_buf->last_non_zeros_min);
+ len += scnprintf(buf + len, buf_len - len, "last_non_zeros_avg %u\n",
+ htt_stats_buf->last_non_zeros_avg);
+ len += scnprintf(buf + len, buf_len - len, "last_non_zeros_sample %u\n\n",
+ htt_stats_buf->last_non_zeros_sample);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -3441,17 +3188,13 @@ htt_print_rx_soc_fw_refill_ring_num_refill_tlv_v(const void *tag_buf,
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- char refill_ring_num_refill[HTT_MAX_STRING_LEN] = {0};
u16 num_elems = min_t(u16, (tag_len >> 2), HTT_RX_STATS_REFILL_MAX_RING);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "HTT_RX_SOC_FW_REFILL_RING_NUM_REFILL_TLV_V:");
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_RX_SOC_FW_REFILL_RING_NUM_REFILL_TLV_V:\n");
- ARRAY_TO_STRING(refill_ring_num_refill,
- htt_stats_buf->refill_ring_num_refill,
- num_elems);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "refill_ring_num_refill = %s\n",
- refill_ring_num_refill);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->refill_ring_num_refill,
+ "refill_ring_num_refill", num_elems, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -3468,113 +3211,106 @@ static inline void htt_print_rx_pdev_fw_stats_tlv(const void *tag_buf,
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- char fw_ring_mgmt_subtype[HTT_MAX_STRING_LEN] = {0};
- char fw_ring_ctrl_subtype[HTT_MAX_STRING_LEN] = {0};
-
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_RX_PDEV_FW_STATS_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u",
- htt_stats_buf->mac_id__word & 0xFF);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ppdu_recvd = %u",
- htt_stats_buf->ppdu_recvd);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_cnt_fcs_ok = %u",
- htt_stats_buf->mpdu_cnt_fcs_ok);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_cnt_fcs_err = %u",
- htt_stats_buf->mpdu_cnt_fcs_err);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tcp_msdu_cnt = %u",
- htt_stats_buf->tcp_msdu_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tcp_ack_msdu_cnt = %u",
- htt_stats_buf->tcp_ack_msdu_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "udp_msdu_cnt = %u",
- htt_stats_buf->udp_msdu_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "other_msdu_cnt = %u",
- htt_stats_buf->other_msdu_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_ring_mpdu_ind = %u",
- htt_stats_buf->fw_ring_mpdu_ind);
-
- ARRAY_TO_STRING(fw_ring_mgmt_subtype,
- htt_stats_buf->fw_ring_mgmt_subtype,
- HTT_STATS_SUBTYPE_MAX);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_ring_mgmt_subtype = %s ",
- fw_ring_mgmt_subtype);
-
- ARRAY_TO_STRING(fw_ring_ctrl_subtype,
- htt_stats_buf->fw_ring_ctrl_subtype,
- HTT_STATS_SUBTYPE_MAX);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_ring_ctrl_subtype = %s ",
- fw_ring_ctrl_subtype);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_ring_mcast_data_msdu = %u",
- htt_stats_buf->fw_ring_mcast_data_msdu);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_ring_bcast_data_msdu = %u",
- htt_stats_buf->fw_ring_bcast_data_msdu);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_ring_ucast_data_msdu = %u",
- htt_stats_buf->fw_ring_ucast_data_msdu);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_ring_null_data_msdu = %u",
- htt_stats_buf->fw_ring_null_data_msdu);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_ring_mpdu_drop = %u",
- htt_stats_buf->fw_ring_mpdu_drop);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ofld_local_data_ind_cnt = %u",
- htt_stats_buf->ofld_local_data_ind_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ofld_local_data_buf_recycle_cnt = %u",
- htt_stats_buf->ofld_local_data_buf_recycle_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "drx_local_data_ind_cnt = %u",
- htt_stats_buf->drx_local_data_ind_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "drx_local_data_buf_recycle_cnt = %u",
- htt_stats_buf->drx_local_data_buf_recycle_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "local_nondata_ind_cnt = %u",
- htt_stats_buf->local_nondata_ind_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "local_nondata_buf_recycle_cnt = %u",
- htt_stats_buf->local_nondata_buf_recycle_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_status_buf_ring_refill_cnt = %u",
- htt_stats_buf->fw_status_buf_ring_refill_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_status_buf_ring_empty_cnt = %u",
- htt_stats_buf->fw_status_buf_ring_empty_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_pkt_buf_ring_refill_cnt = %u",
- htt_stats_buf->fw_pkt_buf_ring_refill_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_pkt_buf_ring_empty_cnt = %u",
- htt_stats_buf->fw_pkt_buf_ring_empty_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_link_buf_ring_refill_cnt = %u",
- htt_stats_buf->fw_link_buf_ring_refill_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_link_buf_ring_empty_cnt = %u",
- htt_stats_buf->fw_link_buf_ring_empty_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "host_pkt_buf_ring_refill_cnt = %u",
- htt_stats_buf->host_pkt_buf_ring_refill_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "host_pkt_buf_ring_empty_cnt = %u",
- htt_stats_buf->host_pkt_buf_ring_empty_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mon_pkt_buf_ring_refill_cnt = %u",
- htt_stats_buf->mon_pkt_buf_ring_refill_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mon_pkt_buf_ring_empty_cnt = %u",
- htt_stats_buf->mon_pkt_buf_ring_empty_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "mon_status_buf_ring_refill_cnt = %u",
- htt_stats_buf->mon_status_buf_ring_refill_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mon_status_buf_ring_empty_cnt = %u",
- htt_stats_buf->mon_status_buf_ring_empty_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mon_desc_buf_ring_refill_cnt = %u",
- htt_stats_buf->mon_desc_buf_ring_refill_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mon_desc_buf_ring_empty_cnt = %u",
- htt_stats_buf->mon_desc_buf_ring_empty_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mon_dest_ring_update_cnt = %u",
- htt_stats_buf->mon_dest_ring_update_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mon_dest_ring_full_cnt = %u",
- htt_stats_buf->mon_dest_ring_full_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_suspend_cnt = %u",
- htt_stats_buf->rx_suspend_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_suspend_fail_cnt = %u",
- htt_stats_buf->rx_suspend_fail_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_resume_cnt = %u",
- htt_stats_buf->rx_resume_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_resume_fail_cnt = %u",
- htt_stats_buf->rx_resume_fail_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_ring_switch_cnt = %u",
- htt_stats_buf->rx_ring_switch_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_ring_restore_cnt = %u",
- htt_stats_buf->rx_ring_restore_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_flush_cnt = %u",
- htt_stats_buf->rx_flush_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_recovery_reset_cnt = %u\n",
- htt_stats_buf->rx_recovery_reset_cnt);
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_RX_PDEV_FW_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n",
+ FIELD_GET(HTT_STATS_MAC_ID, htt_stats_buf->mac_id__word));
+ len += scnprintf(buf + len, buf_len - len, "ppdu_recvd = %u\n",
+ htt_stats_buf->ppdu_recvd);
+ len += scnprintf(buf + len, buf_len - len, "mpdu_cnt_fcs_ok = %u\n",
+ htt_stats_buf->mpdu_cnt_fcs_ok);
+ len += scnprintf(buf + len, buf_len - len, "mpdu_cnt_fcs_err = %u\n",
+ htt_stats_buf->mpdu_cnt_fcs_err);
+ len += scnprintf(buf + len, buf_len - len, "tcp_msdu_cnt = %u\n",
+ htt_stats_buf->tcp_msdu_cnt);
+ len += scnprintf(buf + len, buf_len - len, "tcp_ack_msdu_cnt = %u\n",
+ htt_stats_buf->tcp_ack_msdu_cnt);
+ len += scnprintf(buf + len, buf_len - len, "udp_msdu_cnt = %u\n",
+ htt_stats_buf->udp_msdu_cnt);
+ len += scnprintf(buf + len, buf_len - len, "other_msdu_cnt = %u\n",
+ htt_stats_buf->other_msdu_cnt);
+ len += scnprintf(buf + len, buf_len - len, "fw_ring_mpdu_ind = %u\n",
+ htt_stats_buf->fw_ring_mpdu_ind);
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->fw_ring_mgmt_subtype,
+ "fw_ring_mgmt_subtype", HTT_STATS_SUBTYPE_MAX, "\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->fw_ring_ctrl_subtype,
+ "fw_ring_ctrl_subtype", HTT_STATS_SUBTYPE_MAX, "\n");
+
+ len += scnprintf(buf + len, buf_len - len, "fw_ring_mcast_data_msdu = %u\n",
+ htt_stats_buf->fw_ring_mcast_data_msdu);
+ len += scnprintf(buf + len, buf_len - len, "fw_ring_bcast_data_msdu = %u\n",
+ htt_stats_buf->fw_ring_bcast_data_msdu);
+ len += scnprintf(buf + len, buf_len - len, "fw_ring_ucast_data_msdu = %u\n",
+ htt_stats_buf->fw_ring_ucast_data_msdu);
+ len += scnprintf(buf + len, buf_len - len, "fw_ring_null_data_msdu = %u\n",
+ htt_stats_buf->fw_ring_null_data_msdu);
+ len += scnprintf(buf + len, buf_len - len, "fw_ring_mpdu_drop = %u\n",
+ htt_stats_buf->fw_ring_mpdu_drop);
+ len += scnprintf(buf + len, buf_len - len, "ofld_local_data_ind_cnt = %u\n",
+ htt_stats_buf->ofld_local_data_ind_cnt);
+ len += scnprintf(buf + len, buf_len - len,
+ "ofld_local_data_buf_recycle_cnt = %u\n",
+ htt_stats_buf->ofld_local_data_buf_recycle_cnt);
+ len += scnprintf(buf + len, buf_len - len, "drx_local_data_ind_cnt = %u\n",
+ htt_stats_buf->drx_local_data_ind_cnt);
+ len += scnprintf(buf + len, buf_len - len,
+ "drx_local_data_buf_recycle_cnt = %u\n",
+ htt_stats_buf->drx_local_data_buf_recycle_cnt);
+ len += scnprintf(buf + len, buf_len - len, "local_nondata_ind_cnt = %u\n",
+ htt_stats_buf->local_nondata_ind_cnt);
+ len += scnprintf(buf + len, buf_len - len, "local_nondata_buf_recycle_cnt = %u\n",
+ htt_stats_buf->local_nondata_buf_recycle_cnt);
+ len += scnprintf(buf + len, buf_len - len, "fw_status_buf_ring_refill_cnt = %u\n",
+ htt_stats_buf->fw_status_buf_ring_refill_cnt);
+ len += scnprintf(buf + len, buf_len - len, "fw_status_buf_ring_empty_cnt = %u\n",
+ htt_stats_buf->fw_status_buf_ring_empty_cnt);
+ len += scnprintf(buf + len, buf_len - len, "fw_pkt_buf_ring_refill_cnt = %u\n",
+ htt_stats_buf->fw_pkt_buf_ring_refill_cnt);
+ len += scnprintf(buf + len, buf_len - len, "fw_pkt_buf_ring_empty_cnt = %u\n",
+ htt_stats_buf->fw_pkt_buf_ring_empty_cnt);
+ len += scnprintf(buf + len, buf_len - len, "fw_link_buf_ring_refill_cnt = %u\n",
+ htt_stats_buf->fw_link_buf_ring_refill_cnt);
+ len += scnprintf(buf + len, buf_len - len, "fw_link_buf_ring_empty_cnt = %u\n",
+ htt_stats_buf->fw_link_buf_ring_empty_cnt);
+ len += scnprintf(buf + len, buf_len - len, "host_pkt_buf_ring_refill_cnt = %u\n",
+ htt_stats_buf->host_pkt_buf_ring_refill_cnt);
+ len += scnprintf(buf + len, buf_len - len, "host_pkt_buf_ring_empty_cnt = %u\n",
+ htt_stats_buf->host_pkt_buf_ring_empty_cnt);
+ len += scnprintf(buf + len, buf_len - len, "mon_pkt_buf_ring_refill_cnt = %u\n",
+ htt_stats_buf->mon_pkt_buf_ring_refill_cnt);
+ len += scnprintf(buf + len, buf_len - len, "mon_pkt_buf_ring_empty_cnt = %u\n",
+ htt_stats_buf->mon_pkt_buf_ring_empty_cnt);
+ len += scnprintf(buf + len, buf_len - len,
+ "mon_status_buf_ring_refill_cnt = %u\n",
+ htt_stats_buf->mon_status_buf_ring_refill_cnt);
+ len += scnprintf(buf + len, buf_len - len, "mon_status_buf_ring_empty_cnt = %u\n",
+ htt_stats_buf->mon_status_buf_ring_empty_cnt);
+ len += scnprintf(buf + len, buf_len - len, "mon_desc_buf_ring_refill_cnt = %u\n",
+ htt_stats_buf->mon_desc_buf_ring_refill_cnt);
+ len += scnprintf(buf + len, buf_len - len, "mon_desc_buf_ring_empty_cnt = %u\n",
+ htt_stats_buf->mon_desc_buf_ring_empty_cnt);
+ len += scnprintf(buf + len, buf_len - len, "mon_dest_ring_update_cnt = %u\n",
+ htt_stats_buf->mon_dest_ring_update_cnt);
+ len += scnprintf(buf + len, buf_len - len, "mon_dest_ring_full_cnt = %u\n",
+ htt_stats_buf->mon_dest_ring_full_cnt);
+ len += scnprintf(buf + len, buf_len - len, "rx_suspend_cnt = %u\n",
+ htt_stats_buf->rx_suspend_cnt);
+ len += scnprintf(buf + len, buf_len - len, "rx_suspend_fail_cnt = %u\n",
+ htt_stats_buf->rx_suspend_fail_cnt);
+ len += scnprintf(buf + len, buf_len - len, "rx_resume_cnt = %u\n",
+ htt_stats_buf->rx_resume_cnt);
+ len += scnprintf(buf + len, buf_len - len, "rx_resume_fail_cnt = %u\n",
+ htt_stats_buf->rx_resume_fail_cnt);
+ len += scnprintf(buf + len, buf_len - len, "rx_ring_switch_cnt = %u\n",
+ htt_stats_buf->rx_ring_switch_cnt);
+ len += scnprintf(buf + len, buf_len - len, "rx_ring_restore_cnt = %u\n",
+ htt_stats_buf->rx_ring_restore_cnt);
+ len += scnprintf(buf + len, buf_len - len, "rx_flush_cnt = %u\n",
+ htt_stats_buf->rx_flush_cnt);
+ len += scnprintf(buf + len, buf_len - len, "rx_recovery_reset_cnt = %u\n\n",
+ htt_stats_buf->rx_recovery_reset_cnt);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -3592,16 +3328,12 @@ htt_print_rx_pdev_fw_ring_mpdu_err_tlv_v(const void *tag_buf,
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- char fw_ring_mpdu_err[HTT_MAX_STRING_LEN] = {0};
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "HTT_RX_PDEV_FW_RING_MPDU_ERR_TLV_V:");
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_RX_PDEV_FW_RING_MPDU_ERR_TLV_V:\n");
- ARRAY_TO_STRING(fw_ring_mpdu_err,
- htt_stats_buf->fw_ring_mpdu_err,
- HTT_RX_STATS_RXDMA_MAX_ERR);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_ring_mpdu_err = %s\n",
- fw_ring_mpdu_err);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->fw_ring_mpdu_err,
+ "fw_ring_mpdu_err", HTT_RX_STATS_RXDMA_MAX_ERR, "\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -3620,15 +3352,12 @@ htt_print_rx_pdev_fw_mpdu_drop_tlv_v(const void *tag_buf,
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- char fw_mpdu_drop[HTT_MAX_STRING_LEN] = {0};
u16 num_elems = min_t(u16, (tag_len >> 2), HTT_RX_STATS_FW_DROP_REASON_MAX);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_RX_PDEV_FW_MPDU_DROP_TLV_V:");
+ len += scnprintf(buf + len, buf_len - len, "HTT_RX_PDEV_FW_MPDU_DROP_TLV_V:\n");
- ARRAY_TO_STRING(fw_mpdu_drop,
- htt_stats_buf->fw_mpdu_drop,
- num_elems);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_mpdu_drop = %s\n", fw_mpdu_drop);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->fw_mpdu_drop, "fw_mpdu_drop",
+ num_elems, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -3646,18 +3375,15 @@ htt_print_rx_pdev_fw_stats_phy_err_tlv(const void *tag_buf,
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- char phy_errs[HTT_MAX_STRING_LEN] = {0};
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_RX_PDEV_FW_STATS_PHY_ERR_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id__word = %u",
- htt_stats_buf->mac_id__word);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "total_phy_err_nct = %u",
- htt_stats_buf->total_phy_err_cnt);
+ len += scnprintf(buf + len, buf_len - len, "HTT_RX_PDEV_FW_STATS_PHY_ERR_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id__word = %u\n",
+ htt_stats_buf->mac_id__word);
+ len += scnprintf(buf + len, buf_len - len, "total_phy_err_nct = %u\n",
+ htt_stats_buf->total_phy_err_cnt);
- ARRAY_TO_STRING(phy_errs,
- htt_stats_buf->phy_err,
- HTT_STATS_PHY_ERR_MAX);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "phy_errs = %s\n", phy_errs);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->phy_err, "phy_errs",
+ HTT_STATS_PHY_ERR_MAX, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -3676,20 +3402,20 @@ htt_print_pdev_cca_stats_hist_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "\nHTT_PDEV_CCA_STATS_HIST_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "chan_num = %u",
- htt_stats_buf->chan_num);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "num_records = %u",
- htt_stats_buf->num_records);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "valid_cca_counters_bitmap = 0x%x",
- htt_stats_buf->valid_cca_counters_bitmap);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "collection_interval = %u\n",
- htt_stats_buf->collection_interval);
-
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "HTT_PDEV_STATS_CCA_COUNTERS_TLV:(in usec)");
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "| tx_frame| rx_frame| rx_clear| my_rx_frame| cnt| med_rx_idle| med_tx_idle_global| cca_obss|");
+ len += scnprintf(buf + len, buf_len - len, "\nHTT_PDEV_CCA_STATS_HIST_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "chan_num = %u\n",
+ htt_stats_buf->chan_num);
+ len += scnprintf(buf + len, buf_len - len, "num_records = %u\n",
+ htt_stats_buf->num_records);
+ len += scnprintf(buf + len, buf_len - len, "valid_cca_counters_bitmap = 0x%x\n",
+ htt_stats_buf->valid_cca_counters_bitmap);
+ len += scnprintf(buf + len, buf_len - len, "collection_interval = %u\n\n",
+ htt_stats_buf->collection_interval);
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_PDEV_STATS_CCA_COUNTERS_TLV:(in usec)\n");
+ len += scnprintf(buf + len, buf_len - len,
+ "| tx_frame| rx_frame| rx_clear| my_rx_frame| cnt| med_rx_idle| med_tx_idle_global| cca_obss|\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -3708,16 +3434,16 @@ htt_print_pdev_stats_cca_counters_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "|%10u| %10u| %10u| %11u| %10u| %11u| %18u| %10u|",
- htt_stats_buf->tx_frame_usec,
- htt_stats_buf->rx_frame_usec,
- htt_stats_buf->rx_clear_usec,
- htt_stats_buf->my_rx_frame_usec,
- htt_stats_buf->usec_cnt,
- htt_stats_buf->med_rx_idle_usec,
- htt_stats_buf->med_tx_idle_global_usec,
- htt_stats_buf->cca_obss_usec);
+ len += scnprintf(buf + len, buf_len - len,
+ "|%10u| %10u| %10u| %11u| %10u| %11u| %18u| %10u|\n",
+ htt_stats_buf->tx_frame_usec,
+ htt_stats_buf->rx_frame_usec,
+ htt_stats_buf->rx_clear_usec,
+ htt_stats_buf->my_rx_frame_usec,
+ htt_stats_buf->usec_cnt,
+ htt_stats_buf->med_rx_idle_usec,
+ htt_stats_buf->med_tx_idle_global_usec,
+ htt_stats_buf->cca_obss_usec);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -3735,32 +3461,32 @@ static inline void htt_print_hw_stats_whal_tx_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_HW_STATS_WHAL_TX_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u",
- htt_stats_buf->mac_id__word & 0xFF);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "last_unpause_ppdu_id = %u",
- htt_stats_buf->last_unpause_ppdu_id);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "hwsch_unpause_wait_tqm_write = %u",
- htt_stats_buf->hwsch_unpause_wait_tqm_write);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "hwsch_dummy_tlv_skipped = %u",
- htt_stats_buf->hwsch_dummy_tlv_skipped);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "hwsch_misaligned_offset_received = %u",
- htt_stats_buf->hwsch_misaligned_offset_received);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "hwsch_reset_count = %u",
- htt_stats_buf->hwsch_reset_count);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "hwsch_dev_reset_war = %u",
- htt_stats_buf->hwsch_dev_reset_war);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "hwsch_delayed_pause = %u",
- htt_stats_buf->hwsch_delayed_pause);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "hwsch_long_delayed_pause = %u",
- htt_stats_buf->hwsch_long_delayed_pause);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "sch_rx_ppdu_no_response = %u",
- htt_stats_buf->sch_rx_ppdu_no_response);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "sch_selfgen_response = %u",
- htt_stats_buf->sch_selfgen_response);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "sch_rx_sifs_resp_trigger= %u\n",
- htt_stats_buf->sch_rx_sifs_resp_trigger);
+ len += scnprintf(buf + len, buf_len - len, "HTT_HW_STATS_WHAL_TX_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n",
+ FIELD_GET(HTT_STATS_MAC_ID, htt_stats_buf->mac_id__word));
+ len += scnprintf(buf + len, buf_len - len, "last_unpause_ppdu_id = %u\n",
+ htt_stats_buf->last_unpause_ppdu_id);
+ len += scnprintf(buf + len, buf_len - len, "hwsch_unpause_wait_tqm_write = %u\n",
+ htt_stats_buf->hwsch_unpause_wait_tqm_write);
+ len += scnprintf(buf + len, buf_len - len, "hwsch_dummy_tlv_skipped = %u\n",
+ htt_stats_buf->hwsch_dummy_tlv_skipped);
+ len += scnprintf(buf + len, buf_len - len,
+ "hwsch_misaligned_offset_received = %u\n",
+ htt_stats_buf->hwsch_misaligned_offset_received);
+ len += scnprintf(buf + len, buf_len - len, "hwsch_reset_count = %u\n",
+ htt_stats_buf->hwsch_reset_count);
+ len += scnprintf(buf + len, buf_len - len, "hwsch_dev_reset_war = %u\n",
+ htt_stats_buf->hwsch_dev_reset_war);
+ len += scnprintf(buf + len, buf_len - len, "hwsch_delayed_pause = %u\n",
+ htt_stats_buf->hwsch_delayed_pause);
+ len += scnprintf(buf + len, buf_len - len, "hwsch_long_delayed_pause = %u\n",
+ htt_stats_buf->hwsch_long_delayed_pause);
+ len += scnprintf(buf + len, buf_len - len, "sch_rx_ppdu_no_response = %u\n",
+ htt_stats_buf->sch_rx_ppdu_no_response);
+ len += scnprintf(buf + len, buf_len - len, "sch_selfgen_response = %u\n",
+ htt_stats_buf->sch_selfgen_response);
+ len += scnprintf(buf + len, buf_len - len, "sch_rx_sifs_resp_trigger= %u\n\n",
+ htt_stats_buf->sch_rx_sifs_resp_trigger);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -3779,11 +3505,11 @@ htt_print_pdev_stats_twt_sessions_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_PDEV_STATS_TWT_SESSIONS_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "pdev_id = %u",
- htt_stats_buf->pdev_id);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "num_sessions = %u\n",
- htt_stats_buf->num_sessions);
+ len += scnprintf(buf + len, buf_len - len, "HTT_PDEV_STATS_TWT_SESSIONS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "pdev_id = %u\n",
+ htt_stats_buf->pdev_id);
+ len += scnprintf(buf + len, buf_len - len, "num_sessions = %u\n\n",
+ htt_stats_buf->num_sessions);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -3802,27 +3528,33 @@ htt_print_pdev_stats_twt_session_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_PDEV_STATS_TWT_SESSION_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "vdev_id = %u",
- htt_stats_buf->vdev_id);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "peer_mac = %02x:%02x:%02x:%02x:%02x:%02x",
- htt_stats_buf->peer_mac.mac_addr_l32 & 0xFF,
- (htt_stats_buf->peer_mac.mac_addr_l32 & 0xFF00) >> 8,
- (htt_stats_buf->peer_mac.mac_addr_l32 & 0xFF0000) >> 16,
- (htt_stats_buf->peer_mac.mac_addr_l32 & 0xFF000000) >> 24,
- (htt_stats_buf->peer_mac.mac_addr_h16 & 0xFF),
- (htt_stats_buf->peer_mac.mac_addr_h16 & 0xFF00) >> 8);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "flow_id_flags = %u",
- htt_stats_buf->flow_id_flags);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "dialog_id = %u",
- htt_stats_buf->dialog_id);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "wake_dura_us = %u",
- htt_stats_buf->wake_dura_us);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "wake_intvl_us = %u",
- htt_stats_buf->wake_intvl_us);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "sp_offset_us = %u\n",
- htt_stats_buf->sp_offset_us);
+ len += scnprintf(buf + len, buf_len - len, "HTT_PDEV_STATS_TWT_SESSION_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "vdev_id = %u\n",
+ htt_stats_buf->vdev_id);
+ len += scnprintf(buf + len, buf_len - len,
+ "peer_mac = %02lx:%02lx:%02lx:%02lx:%02lx:%02lx\n",
+ FIELD_GET(HTT_MAC_ADDR_L32_0,
+ htt_stats_buf->peer_mac.mac_addr_l32),
+ FIELD_GET(HTT_MAC_ADDR_L32_1,
+ htt_stats_buf->peer_mac.mac_addr_l32),
+ FIELD_GET(HTT_MAC_ADDR_L32_2,
+ htt_stats_buf->peer_mac.mac_addr_l32),
+ FIELD_GET(HTT_MAC_ADDR_L32_3,
+ htt_stats_buf->peer_mac.mac_addr_l32),
+ FIELD_GET(HTT_MAC_ADDR_H16_0,
+ htt_stats_buf->peer_mac.mac_addr_h16),
+ FIELD_GET(HTT_MAC_ADDR_H16_1,
+ htt_stats_buf->peer_mac.mac_addr_h16));
+ len += scnprintf(buf + len, buf_len - len, "flow_id_flags = %u\n",
+ htt_stats_buf->flow_id_flags);
+ len += scnprintf(buf + len, buf_len - len, "dialog_id = %u\n",
+ htt_stats_buf->dialog_id);
+ len += scnprintf(buf + len, buf_len - len, "wake_dura_us = %u\n",
+ htt_stats_buf->wake_dura_us);
+ len += scnprintf(buf + len, buf_len - len, "wake_intvl_us = %u\n",
+ htt_stats_buf->wake_intvl_us);
+ len += scnprintf(buf + len, buf_len - len, "sp_offset_us = %u\n\n",
+ htt_stats_buf->sp_offset_us);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -3841,21 +3573,21 @@ htt_print_pdev_obss_pd_stats_tlv_v(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "OBSS Tx success PPDU = %u",
+ len += scnprintf(buf + len, buf_len - len, "OBSS Tx success PPDU = %u\n",
htt_stats_buf->num_obss_tx_ppdu_success);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "OBSS Tx failures PPDU = %u\n",
+ len += scnprintf(buf + len, buf_len - len, "OBSS Tx failures PPDU = %u\n",
htt_stats_buf->num_obss_tx_ppdu_failure);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "Non-SRG Opportunities = %u\n",
+ len += scnprintf(buf + len, buf_len - len, "Non-SRG Opportunities = %u\n",
htt_stats_buf->num_non_srg_opportunities);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "Non-SRG tried PPDU = %u\n",
+ len += scnprintf(buf + len, buf_len - len, "Non-SRG tried PPDU = %u\n",
htt_stats_buf->num_non_srg_ppdu_tried);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "Non-SRG success PPDU = %u\n",
+ len += scnprintf(buf + len, buf_len - len, "Non-SRG success PPDU = %u\n",
htt_stats_buf->num_non_srg_ppdu_success);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "SRG Opportunities = %u\n",
+ len += scnprintf(buf + len, buf_len - len, "SRG Opportunities = %u\n",
htt_stats_buf->num_srg_opportunities);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "SRG tried PPDU = %u\n",
+ len += scnprintf(buf + len, buf_len - len, "SRG tried PPDU = %u\n",
htt_stats_buf->num_srg_ppdu_tried);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "SRG success PPDU = %u\n",
+ len += scnprintf(buf + len, buf_len - len, "SRG success PPDU = %u\n\n",
htt_stats_buf->num_srg_ppdu_success);
if (len >= buf_len)
@@ -3878,25 +3610,25 @@ static inline void htt_print_backpressure_stats_tlv_v(const u32 *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "pdev_id = %u",
- htt_stats_buf->pdev_id);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "current_head_idx = %u",
- htt_stats_buf->current_head_idx);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "current_tail_idx = %u",
- htt_stats_buf->current_tail_idx);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "num_htt_msgs_sent = %u",
- htt_stats_buf->num_htt_msgs_sent);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "backpressure_time_ms = %u",
- htt_stats_buf->backpressure_time_ms);
+ len += scnprintf(buf + len, buf_len - len, "pdev_id = %u\n",
+ htt_stats_buf->pdev_id);
+ len += scnprintf(buf + len, buf_len - len, "current_head_idx = %u\n",
+ htt_stats_buf->current_head_idx);
+ len += scnprintf(buf + len, buf_len - len, "current_tail_idx = %u\n",
+ htt_stats_buf->current_tail_idx);
+ len += scnprintf(buf + len, buf_len - len, "num_htt_msgs_sent = %u\n",
+ htt_stats_buf->num_htt_msgs_sent);
+ len += scnprintf(buf + len, buf_len - len,
+ "backpressure_time_ms = %u\n",
+ htt_stats_buf->backpressure_time_ms);
for (i = 0; i < 5; i++)
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "backpressure_hist_%u = %u",
- i + 1, htt_stats_buf->backpressure_hist[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "backpressure_hist_%u = %u\n",
+ i + 1, htt_stats_buf->backpressure_hist[i]);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "============================");
+ len += scnprintf(buf + len, buf_len - len,
+ "============================\n");
if (len >= buf_len) {
buf[buf_len - 1] = 0;
@@ -3907,6 +3639,334 @@ static inline void htt_print_backpressure_stats_tlv_v(const u32 *tag_buf,
}
}
+static inline
+void htt_print_pdev_tx_rate_txbf_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_pdev_txrate_txbf_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ int i;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_STATS_PDEV_TX_RATE_TXBF_STATS:\n");
+
+ len += scnprintf(buf + len, buf_len - len, "tx_ol_mcs = ");
+ for (i = 0; i < HTT_TX_TXBF_RATE_STATS_NUM_MCS_COUNTERS; i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%d:%u,", i, htt_stats_buf->tx_su_ol_mcs[i]);
+ len--;
+
+ len += scnprintf(buf + len, buf_len - len, "\ntx_ibf_mcs = ");
+ for (i = 0; i < HTT_TX_TXBF_RATE_STATS_NUM_MCS_COUNTERS; i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%d:%u,", i, htt_stats_buf->tx_su_ibf_mcs[i]);
+ len--;
+
+ len += scnprintf(buf + len, buf_len - len, "\ntx_txbf_mcs =");
+ for (i = 0; i < HTT_TX_TXBF_RATE_STATS_NUM_MCS_COUNTERS; i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%d:%u,", i, htt_stats_buf->tx_su_txbf_mcs[i]);
+ len--;
+
+ len += scnprintf(buf + len, buf_len - len, "\ntx_ol_nss = ");
+ for (i = 0; i < HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS; i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%d:%u,", i, htt_stats_buf->tx_su_ol_nss[i]);
+ len--;
+
+ len += scnprintf(buf + len, buf_len - len, "\ntx_ibf_nss = ");
+ for (i = 0; i < HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS; i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%d:%u,", i, htt_stats_buf->tx_su_ibf_nss[i]);
+ len--;
+
+ len += scnprintf(buf + len, buf_len - len, "\ntx_txbf_nss = ");
+ for (i = 0; i < HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS; i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%d:%u,", i, htt_stats_buf->tx_su_txbf_nss[i]);
+ len--;
+
+ len += scnprintf(buf + len, buf_len - len, "\ntx_ol_bw = ");
+ for (i = 0; i < HTT_TX_TXBF_RATE_STATS_NUM_BW_COUNTERS; i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%d:%u,", i, htt_stats_buf->tx_su_ol_bw[i]);
+ len--;
+
+ len += scnprintf(buf + len, buf_len - len, "\ntx_ibf_bw = ");
+ for (i = 0; i < HTT_TX_TXBF_RATE_STATS_NUM_BW_COUNTERS; i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%d:%u,", i, htt_stats_buf->tx_su_ibf_bw[i]);
+ len--;
+
+ len += scnprintf(buf + len, buf_len - len, "\ntx_txbf_bw = ");
+ for (i = 0; i < HTT_TX_TXBF_RATE_STATS_NUM_BW_COUNTERS; i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%d:%u,", i, htt_stats_buf->tx_su_txbf_bw[i]);
+ len--;
+
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ stats_req->buf_len = len;
+}
+
+static inline
+void htt_print_txbf_ofdma_ndpa_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_txbf_ofdma_ndpa_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ int i;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TXBF_OFDMA_NDPA_STATS_TLV:\n");
+
+ for (i = 0; i < HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS; i++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ofdma_ndpa_queued_user%d = %u\n",
+ i, htt_stats_buf->ax_ofdma_ndpa_queued[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ofdma_ndpa_tried_user%d = %u\n",
+ i, htt_stats_buf->ax_ofdma_ndpa_tried[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ofdma_ndpa_flushed_user%d = %u\n",
+ i, htt_stats_buf->ax_ofdma_ndpa_flushed[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ofdma_ndpa_err_user%d = %u\n",
+ i, htt_stats_buf->ax_ofdma_ndpa_err[i]);
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ }
+
+ stats_req->buf_len = len;
+}
+
+static inline
+void htt_print_txbf_ofdma_ndp_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_txbf_ofdma_ndp_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ int i;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TXBF_OFDMA_NDP_STATS_TLV:\n");
+
+ for (i = 0; i < HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS; i++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ofdma_ndp_queued_user%d = %u\n",
+ i, htt_stats_buf->ax_ofdma_ndp_queued[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ofdma_ndp_tried_user%d = %u\n",
+ i, htt_stats_buf->ax_ofdma_ndp_tried[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ofdma_ndp_flushed_user%d = %u\n",
+ i, htt_stats_buf->ax_ofdma_ndp_flushed[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ofdma_ndp_err_user%d = %u\n",
+ i, htt_stats_buf->ax_ofdma_ndp_err[i]);
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ }
+
+ stats_req->buf_len = len;
+}
+
+static inline
+void htt_print_txbf_ofdma_brp_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_txbf_ofdma_brp_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ int i;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TXBF_OFDMA_BRP_STATS_TLV:\n");
+
+ for (i = 0; i < HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS; i++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ofdma_brpoll_queued_user%d = %u\n",
+ i, htt_stats_buf->ax_ofdma_brpoll_queued[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ofdma_brpoll_tried_user%d = %u\n",
+ i, htt_stats_buf->ax_ofdma_brpoll_tried[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ofdma_brpoll_flushed_user%d = %u\n",
+ i, htt_stats_buf->ax_ofdma_brpoll_flushed[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ofdma_brp_err_user%d = %u\n",
+ i, htt_stats_buf->ax_ofdma_brp_err[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ofdma_brp_err_num_cbf_rcvd_user%d = %u\n",
+ i, htt_stats_buf->ax_ofdma_brp_err_num_cbf_rcvd[i]);
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ }
+
+ stats_req->buf_len = len;
+}
+
+static inline
+void htt_print_txbf_ofdma_steer_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_txbf_ofdma_steer_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ int i;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TXBF_OFDMA_STEER_STATS_TLV:\n");
+
+ for (i = 0; i < HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS; i++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ofdma_num_ppdu_steer_user%d = %u\n",
+ i, htt_stats_buf->ax_ofdma_num_ppdu_steer[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ofdma_num_ppdu_ol_user%d = %u\n",
+ i, htt_stats_buf->ax_ofdma_num_ppdu_ol[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ofdma_num_usrs_prefetch_user%d = %u\n",
+ i, htt_stats_buf->ax_ofdma_num_usrs_prefetch[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ofdma_num_usrs_sound_user%d = %u\n",
+ i, htt_stats_buf->ax_ofdma_num_usrs_sound[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ofdma_num_usrs_force_sound_user%d = %u\n",
+ i, htt_stats_buf->ax_ofdma_num_usrs_force_sound[i]);
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ }
+
+ stats_req->buf_len = len;
+}
+
+static inline
+void htt_print_phy_counters_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_phy_counters_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ int i;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_PHY_COUNTERS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "rx_ofdma_timing_err_cnt = %u\n",
+ htt_stats_buf->rx_ofdma_timing_err_cnt);
+ len += scnprintf(buf + len, buf_len - len, "rx_cck_fail_cnt = %u\n",
+ htt_stats_buf->rx_cck_fail_cnt);
+ len += scnprintf(buf + len, buf_len - len, "mactx_abort_cnt = %u\n",
+ htt_stats_buf->mactx_abort_cnt);
+ len += scnprintf(buf + len, buf_len - len, "macrx_abort_cnt = %u\n",
+ htt_stats_buf->macrx_abort_cnt);
+ len += scnprintf(buf + len, buf_len - len, "phytx_abort_cnt = %u\n",
+ htt_stats_buf->phytx_abort_cnt);
+ len += scnprintf(buf + len, buf_len - len, "phyrx_abort_cnt = %u\n",
+ htt_stats_buf->phyrx_abort_cnt);
+ len += scnprintf(buf + len, buf_len - len, "phyrx_defer_abort_cnt = %u\n",
+ htt_stats_buf->phyrx_defer_abort_cnt);
+ len += scnprintf(buf + len, buf_len - len, "rx_gain_adj_lstf_event_cnt = %u\n",
+ htt_stats_buf->rx_gain_adj_lstf_event_cnt);
+ len += scnprintf(buf + len, buf_len - len, "rx_gain_adj_non_legacy_cnt = %u\n",
+ htt_stats_buf->rx_gain_adj_non_legacy_cnt);
+
+ for (i = 0; i < HTT_MAX_RX_PKT_CNT; i++)
+ len += scnprintf(buf + len, buf_len - len, "rx_pkt_cnt[%d] = %u\n",
+ i, htt_stats_buf->rx_pkt_cnt[i]);
+
+ for (i = 0; i < HTT_MAX_RX_PKT_CRC_PASS_CNT; i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "rx_pkt_crc_pass_cnt[%d] = %u\n",
+ i, htt_stats_buf->rx_pkt_crc_pass_cnt[i]);
+
+ for (i = 0; i < HTT_MAX_PER_BLK_ERR_CNT; i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "per_blk_err_cnt[%d] = %u\n",
+ i, htt_stats_buf->per_blk_err_cnt[i]);
+
+ for (i = 0; i < HTT_MAX_RX_OTA_ERR_CNT; i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "rx_ota_err_cnt[%d] = %u\n",
+ i, htt_stats_buf->rx_ota_err_cnt[i]);
+
+ stats_req->buf_len = len;
+}
+
+static inline
+void htt_print_phy_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_phy_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ int i;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_PHY_STATS_TLV:\n");
+
+ for (i = 0; i < HTT_STATS_MAX_CHAINS; i++)
+ len += scnprintf(buf + len, buf_len - len, "nf_chain[%d] = %d\n",
+ i, htt_stats_buf->nf_chain[i]);
+
+ len += scnprintf(buf + len, buf_len - len, "false_radar_cnt = %u\n",
+ htt_stats_buf->false_radar_cnt);
+ len += scnprintf(buf + len, buf_len - len, "radar_cs_cnt = %u\n",
+ htt_stats_buf->radar_cs_cnt);
+ len += scnprintf(buf + len, buf_len - len, "ani_level = %d\n",
+ htt_stats_buf->ani_level);
+ len += scnprintf(buf + len, buf_len - len, "fw_run_time = %u\n",
+ htt_stats_buf->fw_run_time);
+
+ stats_req->buf_len = len;
+}
+
+static inline
+void htt_print_peer_ctrl_path_txrx_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_peer_ctrl_path_txrx_stats_tlv *htt_stat_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ int i;
+ const char *mgmt_frm_type[ATH11K_STATS_MGMT_FRM_TYPE_MAX - 1] = {
+ "assoc_req", "assoc_resp",
+ "reassoc_req", "reassoc_resp",
+ "probe_req", "probe_resp",
+ "timing_advertisement", "reserved",
+ "beacon", "atim", "disassoc",
+ "auth", "deauth", "action", "action_no_ack"};
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_STATS_PEER_CTRL_PATH_TXRX_STATS_TAG:\n");
+ len += scnprintf(buf + len, buf_len - len,
+ "peer_mac_addr = %02x:%02x:%02x:%02x:%02x:%02x\n",
+ htt_stat_buf->peer_mac_addr[0], htt_stat_buf->peer_mac_addr[1],
+ htt_stat_buf->peer_mac_addr[2], htt_stat_buf->peer_mac_addr[3],
+ htt_stat_buf->peer_mac_addr[4], htt_stat_buf->peer_mac_addr[5]);
+
+ len += scnprintf(buf + len, buf_len - len, "peer_tx_mgmt_subtype:\n");
+ for (i = 0; i < ATH11K_STATS_MGMT_FRM_TYPE_MAX - 1; i++)
+ len += scnprintf(buf + len, buf_len - len, "%s:%u\n",
+ mgmt_frm_type[i],
+ htt_stat_buf->peer_rx_mgmt_subtype[i]);
+
+ len += scnprintf(buf + len, buf_len - len, "peer_rx_mgmt_subtype:\n");
+ for (i = 0; i < ATH11K_STATS_MGMT_FRM_TYPE_MAX - 1; i++)
+ len += scnprintf(buf + len, buf_len - len, "%s:%u\n",
+ mgmt_frm_type[i],
+ htt_stat_buf->peer_rx_mgmt_subtype[i]);
+
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ stats_req->buf_len = len;
+}
+
static int ath11k_dbg_htt_ext_stats_parse(struct ath11k_base *ab,
u16 tag, u16 len, const void *tag_buf,
void *user_data)
@@ -4258,6 +4318,30 @@ static int ath11k_dbg_htt_ext_stats_parse(struct ath11k_base *ab,
case HTT_STATS_RING_BACKPRESSURE_STATS_TAG:
htt_print_backpressure_stats_tlv_v(tag_buf, user_data);
break;
+ case HTT_STATS_PDEV_TX_RATE_TXBF_STATS_TAG:
+ htt_print_pdev_tx_rate_txbf_stats_tlv(tag_buf, stats_req);
+ break;
+ case HTT_STATS_TXBF_OFDMA_NDPA_STATS_TAG:
+ htt_print_txbf_ofdma_ndpa_stats_tlv(tag_buf, stats_req);
+ break;
+ case HTT_STATS_TXBF_OFDMA_NDP_STATS_TAG:
+ htt_print_txbf_ofdma_ndp_stats_tlv(tag_buf, stats_req);
+ break;
+ case HTT_STATS_TXBF_OFDMA_BRP_STATS_TAG:
+ htt_print_txbf_ofdma_brp_stats_tlv(tag_buf, stats_req);
+ break;
+ case HTT_STATS_TXBF_OFDMA_STEER_STATS_TAG:
+ htt_print_txbf_ofdma_steer_stats_tlv(tag_buf, stats_req);
+ break;
+ case HTT_STATS_PHY_COUNTERS_TAG:
+ htt_print_phy_counters_tlv(tag_buf, stats_req);
+ break;
+ case HTT_STATS_PHY_STATS_TAG:
+ htt_print_phy_stats_tlv(tag_buf, stats_req);
+ break;
+ case HTT_STATS_PEER_CTRL_PATH_TXRX_STATS_TAG:
+ htt_print_peer_ctrl_path_txrx_stats_tlv(tag_buf, stats_req);
+ break;
default:
break;
}
@@ -4345,8 +4429,7 @@ static ssize_t ath11k_write_htt_stats_type(struct file *file,
if (type >= ATH11K_DBG_HTT_NUM_EXT_STATS)
return -E2BIG;
- if (type == ATH11K_DBG_HTT_EXT_STATS_RESET ||
- type == ATH11K_DBG_HTT_EXT_STATS_PEER_INFO)
+ if (type == ATH11K_DBG_HTT_EXT_STATS_RESET)
return -EPERM;
ar->debug.htt_stats.type = type;
@@ -4407,6 +4490,15 @@ static int ath11k_prep_htt_stats_cfg_params(struct ath11k *ar, u8 type,
case ATH11K_DBG_HTT_EXT_STATS_TX_SOUNDING_INFO:
cfg_params->cfg0 = HTT_STAT_DEFAULT_CFG0_ACTIVE_VDEVS;
break;
+ case ATH11K_DBG_HTT_EXT_STATS_PEER_CTRL_PATH_TXRX_STATS:
+ cfg_params->cfg0 = HTT_STAT_PEER_INFO_MAC_ADDR;
+ cfg_params->cfg1 |= FIELD_PREP(GENMASK(7, 0), mac_addr[0]);
+ cfg_params->cfg1 |= FIELD_PREP(GENMASK(15, 8), mac_addr[1]);
+ cfg_params->cfg1 |= FIELD_PREP(GENMASK(23, 16), mac_addr[2]);
+ cfg_params->cfg1 |= FIELD_PREP(GENMASK(31, 24), mac_addr[3]);
+ cfg_params->cfg2 |= FIELD_PREP(GENMASK(7, 0), mac_addr[4]);
+ cfg_params->cfg2 |= FIELD_PREP(GENMASK(15, 8), mac_addr[5]);
+ break;
default:
break;
}
@@ -4464,7 +4556,9 @@ static int ath11k_open_htt_stats(struct inode *inode, struct file *file)
u8 type = ar->debug.htt_stats.type;
int ret;
- if (type == ATH11K_DBG_HTT_EXT_STATS_RESET)
+ if (type == ATH11K_DBG_HTT_EXT_STATS_RESET ||
+ type == ATH11K_DBG_HTT_EXT_STATS_PEER_INFO ||
+ type == ATH11K_DBG_HTT_EXT_STATS_PEER_CTRL_PATH_TXRX_STATS)
return -EPERM;
mutex_lock(&ar->conf_mutex);
diff --git a/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.h b/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.h
index d428f52003a4..dc210c54d131 100644
--- a/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.h
+++ b/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.h
@@ -102,6 +102,14 @@ enum htt_tlv_tag_t {
HTT_STATS_PDEV_OBSS_PD_TAG = 88,
HTT_STATS_HW_WAR_TAG = 89,
HTT_STATS_RING_BACKPRESSURE_STATS_TAG = 90,
+ HTT_STATS_PEER_CTRL_PATH_TXRX_STATS_TAG = 101,
+ HTT_STATS_PDEV_TX_RATE_TXBF_STATS_TAG = 108,
+ HTT_STATS_TXBF_OFDMA_NDPA_STATS_TAG = 113,
+ HTT_STATS_TXBF_OFDMA_NDP_STATS_TAG = 114,
+ HTT_STATS_TXBF_OFDMA_BRP_STATS_TAG = 115,
+ HTT_STATS_TXBF_OFDMA_STEER_STATS_TAG = 116,
+ HTT_STATS_PHY_COUNTERS_TAG = 121,
+ HTT_STATS_PHY_STATS_TAG = 122,
HTT_STATS_MAX_TAG,
};
@@ -137,6 +145,8 @@ struct htt_stats_string_tlv {
u32 data[0]; /* Can be variable length */
} __packed;
+#define HTT_STATS_MAC_ID GENMASK(7, 0)
+
/* == TX PDEV STATS == */
struct htt_tx_pdev_stats_cmn_tlv {
u32 mac_id__word;
@@ -290,6 +300,10 @@ struct htt_hw_stats_whal_tx_tlv {
};
/* ============ PEER STATS ============ */
+#define HTT_MSDU_FLOW_STATS_TX_FLOW_NO GENMASK(15, 0)
+#define HTT_MSDU_FLOW_STATS_TID_NUM GENMASK(19, 16)
+#define HTT_MSDU_FLOW_STATS_DROP_RULE BIT(20)
+
struct htt_msdu_flow_stats_tlv {
u32 last_update_timestamp;
u32 last_add_timestamp;
@@ -306,6 +320,11 @@ struct htt_msdu_flow_stats_tlv {
#define MAX_HTT_TID_NAME 8
+#define HTT_TX_TID_STATS_SW_PEER_ID GENMASK(15, 0)
+#define HTT_TX_TID_STATS_TID_NUM GENMASK(31, 16)
+#define HTT_TX_TID_STATS_NUM_SCHED_PENDING GENMASK(7, 0)
+#define HTT_TX_TID_STATS_NUM_PPDU_IN_HWQ GENMASK(15, 8)
+
/* Tidq stats */
struct htt_tx_tid_stats_tlv {
/* Stored as little endian */
@@ -326,6 +345,11 @@ struct htt_tx_tid_stats_tlv {
u32 tid_tx_airtime;
};
+#define HTT_TX_TID_STATS_V1_SW_PEER_ID GENMASK(15, 0)
+#define HTT_TX_TID_STATS_V1_TID_NUM GENMASK(31, 16)
+#define HTT_TX_TID_STATS_V1_NUM_SCHED_PENDING GENMASK(7, 0)
+#define HTT_TX_TID_STATS_V1_NUM_PPDU_IN_HWQ GENMASK(15, 8)
+
/* Tidq stats */
struct htt_tx_tid_stats_v1_tlv {
/* Stored as little endian */
@@ -348,6 +372,9 @@ struct htt_tx_tid_stats_v1_tlv {
u32 sendn_frms_allowed;
};
+#define HTT_RX_TID_STATS_SW_PEER_ID GENMASK(15, 0)
+#define HTT_RX_TID_STATS_TID_NUM GENMASK(31, 16)
+
struct htt_rx_tid_stats_tlv {
u32 sw_peer_id__tid_num;
u8 tid_name[MAX_HTT_TID_NAME];
@@ -386,6 +413,10 @@ struct htt_peer_stats_cmn_tlv {
u32 inactive_time;
};
+#define HTT_PEER_DETAILS_VDEV_ID GENMASK(7, 0)
+#define HTT_PEER_DETAILS_PDEV_ID GENMASK(15, 8)
+#define HTT_PEER_DETAILS_AST_IDX GENMASK(31, 16)
+
struct htt_peer_details_tlv {
u32 peer_type;
u32 sw_peer_id;
@@ -510,6 +541,9 @@ struct htt_tx_hwq_mu_mimo_mpdu_stats_tlv {
u32 mu_mimo_ampdu_underrun_usr;
};
+#define HTT_TX_HWQ_STATS_MAC_ID GENMASK(7, 0)
+#define HTT_TX_HWQ_STATS_HWQ_ID GENMASK(15, 8)
+
struct htt_tx_hwq_mu_mimo_cmn_stats_tlv {
u32 mac_id__hwq_id__word;
};
@@ -789,6 +823,9 @@ struct htt_sched_txq_sched_ineligibility_tlv_v {
u32 sched_ineligibility[0];
};
+#define HTT_TX_PDEV_STATS_SCHED_PER_TXQ_MAC_ID GENMASK(7, 0)
+#define HTT_TX_PDEV_STATS_SCHED_PER_TXQ_ID GENMASK(15, 8)
+
struct htt_tx_pdev_stats_sched_per_txq_tlv {
u32 mac_id__txq_id__word;
u32 sched_policy;
@@ -910,6 +947,9 @@ struct htt_tx_tqm_error_stats_tlv {
};
/* == TQM CMDQ stats == */
+#define HTT_TX_TQM_CMDQ_STATUS_MAC_ID GENMASK(7, 0)
+#define HTT_TX_TQM_CMDQ_STATUS_CMDQ_ID GENMASK(15, 8)
+
struct htt_tx_tqm_cmdq_status_tlv {
u32 mac_id__cmdq_id__word;
u32 sync_cmd;
@@ -1055,6 +1095,15 @@ struct htt_tx_de_cmn_stats_tlv {
#define HTT_STATS_LOW_WM_BINS 5
#define HTT_STATS_HIGH_WM_BINS 5
+#define HTT_RING_IF_STATS_NUM_ELEMS GENMASK(15, 0)
+#define HTT_RING_IF_STATS_PREFETCH_TAIL_INDEX GENMASK(31, 16)
+#define HTT_RING_IF_STATS_HEAD_IDX GENMASK(15, 0)
+#define HTT_RING_IF_STATS_TAIL_IDX GENMASK(31, 16)
+#define HTT_RING_IF_STATS_SHADOW_HEAD_IDX GENMASK(15, 0)
+#define HTT_RING_IF_STATS_SHADOW_TAIL_IDX GENMASK(31, 16)
+#define HTT_RING_IF_STATS_LWM_THRESH GENMASK(15, 0)
+#define HTT_RING_IF_STATS_HWM_THRESH GENMASK(31, 16)
+
struct htt_ring_if_stats_tlv {
u32 base_addr; /* DWORD aligned base memory address of the ring */
u32 elem_size;
@@ -1117,6 +1166,19 @@ struct htt_sfm_cmn_tlv {
};
/* == SRNG STATS == */
+#define HTT_SRING_STATS_MAC_ID GENMASK(7, 0)
+#define HTT_SRING_STATS_RING_ID GENMASK(15, 8)
+#define HTT_SRING_STATS_ARENA GENMASK(23, 16)
+#define HTT_SRING_STATS_EP BIT(24)
+#define HTT_SRING_STATS_NUM_AVAIL_WORDS GENMASK(15, 0)
+#define HTT_SRING_STATS_NUM_VALID_WORDS GENMASK(31, 16)
+#define HTT_SRING_STATS_HEAD_PTR GENMASK(15, 0)
+#define HTT_SRING_STATS_TAIL_PTR GENMASK(31, 16)
+#define HTT_SRING_STATS_CONSUMER_EMPTY GENMASK(15, 0)
+#define HTT_SRING_STATS_PRODUCER_FULL GENMASK(31, 16)
+#define HTT_SRING_STATS_PREFETCH_COUNT GENMASK(15, 0)
+#define HTT_SRING_STATS_INTERNAL_TAIL_PTR GENMASK(31, 16)
+
struct htt_sring_stats_tlv {
u32 mac_id__ring_id__arena__ep;
u32 base_addr_lsb; /* DWORD aligned base memory address of the ring */
@@ -1696,6 +1758,170 @@ struct htt_ring_backpressure_stats_tlv {
u32 backpressure_hist[5];
};
+#define HTT_TX_TXBF_RATE_STATS_NUM_MCS_COUNTERS 14
+#define HTT_TX_TXBF_RATE_STATS_NUM_BW_COUNTERS 5
+#define HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS 8
+
+struct htt_pdev_txrate_txbf_stats_tlv {
+ /* SU TxBF TX MCS stats */
+ u32 tx_su_txbf_mcs[HTT_TX_TXBF_RATE_STATS_NUM_MCS_COUNTERS];
+ /* Implicit BF TX MCS stats */
+ u32 tx_su_ibf_mcs[HTT_TX_TXBF_RATE_STATS_NUM_MCS_COUNTERS];
+ /* Open loop TX MCS stats */
+ u32 tx_su_ol_mcs[HTT_TX_TXBF_RATE_STATS_NUM_MCS_COUNTERS];
+ /* SU TxBF TX NSS stats */
+ u32 tx_su_txbf_nss[HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+ /* Implicit BF TX NSS stats */
+ u32 tx_su_ibf_nss[HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+ /* Open loop TX NSS stats */
+ u32 tx_su_ol_nss[HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+ /* SU TxBF TX BW stats */
+ u32 tx_su_txbf_bw[HTT_TX_TXBF_RATE_STATS_NUM_BW_COUNTERS];
+ /* Implicit BF TX BW stats */
+ u32 tx_su_ibf_bw[HTT_TX_TXBF_RATE_STATS_NUM_BW_COUNTERS];
+ /* Open loop TX BW stats */
+ u32 tx_su_ol_bw[HTT_TX_TXBF_RATE_STATS_NUM_BW_COUNTERS];
+};
+
+struct htt_txbf_ofdma_ndpa_stats_tlv {
+ /* 11AX HE OFDMA NDPA frame queued to the HW */
+ u32 ax_ofdma_ndpa_queued[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+ /* 11AX HE OFDMA NDPA frame sent over the air */
+ u32 ax_ofdma_ndpa_tried[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+ /* 11AX HE OFDMA NDPA frame flushed by HW */
+ u32 ax_ofdma_ndpa_flushed[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+ /* 11AX HE OFDMA NDPA frame completed with error(s) */
+ u32 ax_ofdma_ndpa_err[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+};
+
+struct htt_txbf_ofdma_ndp_stats_tlv {
+ /* 11AX HE OFDMA NDP frame queued to the HW */
+ u32 ax_ofdma_ndp_queued[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+ /* 11AX HE OFDMA NDPA frame sent over the air */
+ u32 ax_ofdma_ndp_tried[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+ /* 11AX HE OFDMA NDPA frame flushed by HW */
+ u32 ax_ofdma_ndp_flushed[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+ /* 11AX HE OFDMA NDPA frame completed with error(s) */
+ u32 ax_ofdma_ndp_err[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+};
+
+struct htt_txbf_ofdma_brp_stats_tlv {
+ /* 11AX HE OFDMA MU BRPOLL frame queued to the HW */
+ u32 ax_ofdma_brpoll_queued[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+ /* 11AX HE OFDMA MU BRPOLL frame sent over the air */
+ u32 ax_ofdma_brpoll_tried[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+ /* 11AX HE OFDMA MU BRPOLL frame flushed by HW */
+ u32 ax_ofdma_brpoll_flushed[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+ /* 11AX HE OFDMA MU BRPOLL frame completed with error(s) */
+ u32 ax_ofdma_brp_err[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+ /* Number of CBF(s) received when 11AX HE OFDMA MU BRPOLL frame
+ * completed with error(s).
+ */
+ u32 ax_ofdma_brp_err_num_cbf_rcvd[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS + 1];
+};
+
+struct htt_txbf_ofdma_steer_stats_tlv {
+ /* 11AX HE OFDMA PPDUs that were sent over the air with steering (TXBF + OFDMA) */
+ u32 ax_ofdma_num_ppdu_steer[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+ /* 11AX HE OFDMA PPDUs that were sent over the air in open loop */
+ u32 ax_ofdma_num_ppdu_ol[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+ /* 11AX HE OFDMA number of users for which CBF prefetch was
+ * initiated to PHY HW during TX.
+ */
+ u32 ax_ofdma_num_usrs_prefetch[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+ /* 11AX HE OFDMA number of users for which sounding was initiated during TX */
+ u32 ax_ofdma_num_usrs_sound[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+ /* 11AX HE OFDMA number of users for which sounding was forced during TX */
+ u32 ax_ofdma_num_usrs_force_sound[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+};
+
+#define HTT_MAX_RX_PKT_CNT 8
+#define HTT_MAX_RX_PKT_CRC_PASS_CNT 8
+#define HTT_MAX_PER_BLK_ERR_CNT 20
+#define HTT_MAX_RX_OTA_ERR_CNT 14
+#define HTT_STATS_MAX_CHAINS 8
+#define ATH11K_STATS_MGMT_FRM_TYPE_MAX 16
+
+struct htt_phy_counters_tlv {
+ /* number of RXTD OFDMA OTA error counts except power surge and drop */
+ u32 rx_ofdma_timing_err_cnt;
+ /* rx_cck_fail_cnt:
+ * number of cck error counts due to rx reception failure because of
+ * timing error in cck
+ */
+ u32 rx_cck_fail_cnt;
+ /* number of times tx abort initiated by mac */
+ u32 mactx_abort_cnt;
+ /* number of times rx abort initiated by mac */
+ u32 macrx_abort_cnt;
+ /* number of times tx abort initiated by phy */
+ u32 phytx_abort_cnt;
+ /* number of times rx abort initiated by phy */
+ u32 phyrx_abort_cnt;
+ /* number of rx defered count initiated by phy */
+ u32 phyrx_defer_abort_cnt;
+ /* number of sizing events generated at LSTF */
+ u32 rx_gain_adj_lstf_event_cnt;
+ /* number of sizing events generated at non-legacy LTF */
+ u32 rx_gain_adj_non_legacy_cnt;
+ /* rx_pkt_cnt -
+ * Received EOP (end-of-packet) count per packet type;
+ * [0] = 11a; [1] = 11b; [2] = 11n; [3] = 11ac; [4] = 11ax; [5] = GF
+ * [6-7]=RSVD
+ */
+ u32 rx_pkt_cnt[HTT_MAX_RX_PKT_CNT];
+ /* rx_pkt_crc_pass_cnt -
+ * Received EOP (end-of-packet) count per packet type;
+ * [0] = 11a; [1] = 11b; [2] = 11n; [3] = 11ac; [4] = 11ax; [5] = GF
+ * [6-7]=RSVD
+ */
+ u32 rx_pkt_crc_pass_cnt[HTT_MAX_RX_PKT_CRC_PASS_CNT];
+ /* per_blk_err_cnt -
+ * Error count per error source;
+ * [0] = unknown; [1] = LSIG; [2] = HTSIG; [3] = VHTSIG; [4] = HESIG;
+ * [5] = RXTD_OTA; [6] = RXTD_FATAL; [7] = DEMF; [8] = ROBE;
+ * [9] = PMI; [10] = TXFD; [11] = TXTD; [12] = PHYRF
+ * [13-19]=RSVD
+ */
+ u32 per_blk_err_cnt[HTT_MAX_PER_BLK_ERR_CNT];
+ /* rx_ota_err_cnt -
+ * RXTD OTA (over-the-air) error count per error reason;
+ * [0] = voting fail; [1] = weak det fail; [2] = strong sig fail;
+ * [3] = cck fail; [4] = power surge; [5] = power drop;
+ * [6] = btcf timing timeout error; [7] = btcf packet detect error;
+ * [8] = coarse timing timeout error
+ * [9-13]=RSVD
+ */
+ u32 rx_ota_err_cnt[HTT_MAX_RX_OTA_ERR_CNT];
+};
+
+struct htt_phy_stats_tlv {
+ /* per chain hw noise floor values in dBm */
+ s32 nf_chain[HTT_STATS_MAX_CHAINS];
+ /* number of false radars detected */
+ u32 false_radar_cnt;
+ /* number of channel switches happened due to radar detection */
+ u32 radar_cs_cnt;
+ /* ani_level -
+ * ANI level (noise interference) corresponds to the channel
+ * the desense levels range from -5 to 15 in dB units,
+ * higher values indicating more noise interference.
+ */
+ s32 ani_level;
+ /* running time in minutes since FW boot */
+ u32 fw_run_time;
+};
+
+struct htt_peer_ctrl_path_txrx_stats_tlv {
+ /* peer mac address */
+ u8 peer_mac_addr[ETH_ALEN];
+ u8 rsvd[2];
+ /* Num of tx mgmt frames with subtype on peer level */
+ u32 peer_tx_mgmt_subtype[ATH11K_STATS_MGMT_FRM_TYPE_MAX];
+ /* Num of rx mgmt frames with subtype on peer level */
+ u32 peer_rx_mgmt_subtype[ATH11K_STATS_MGMT_FRM_TYPE_MAX];
+};
+
#ifdef CONFIG_ATH11K_DEBUGFS
void ath11k_debugfs_htt_stats_init(struct ath11k *ar);
diff --git a/drivers/net/wireless/ath/ath11k/debugfs_sta.c b/drivers/net/wireless/ath/ath11k/debugfs_sta.c
index 270c0edbb10f..fecd9718f5ce 100644
--- a/drivers/net/wireless/ath/ath11k/debugfs_sta.c
+++ b/drivers/net/wireless/ath/ath11k/debugfs_sta.c
@@ -419,15 +419,21 @@ ath11k_dbg_sta_open_htt_peer_stats(struct inode *inode, struct file *file)
struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
struct ath11k *ar = arsta->arvif->ar;
struct debug_htt_stats_req *stats_req;
+ int type = ar->debug.htt_stats.type;
int ret;
+ if ((type != ATH11K_DBG_HTT_EXT_STATS_PEER_INFO &&
+ type != ATH11K_DBG_HTT_EXT_STATS_PEER_CTRL_PATH_TXRX_STATS) ||
+ type == ATH11K_DBG_HTT_EXT_STATS_RESET)
+ return -EPERM;
+
stats_req = vzalloc(sizeof(*stats_req) + ATH11K_HTT_STATS_BUF_SIZE);
if (!stats_req)
return -ENOMEM;
mutex_lock(&ar->conf_mutex);
ar->debug.htt_stats.stats_req = stats_req;
- stats_req->type = ATH11K_DBG_HTT_EXT_STATS_PEER_INFO;
+ stats_req->type = type;
memcpy(stats_req->peer_addr, sta->addr, ETH_ALEN);
ret = ath11k_debugfs_htt_stats_req(ar);
mutex_unlock(&ar->conf_mutex);
diff --git a/drivers/net/wireless/ath/ath11k/dp.h b/drivers/net/wireless/ath/ath11k/dp.h
index ee768ccce46e..d6267bfa0264 100644
--- a/drivers/net/wireless/ath/ath11k/dp.h
+++ b/drivers/net/wireless/ath/ath11k/dp.h
@@ -195,6 +195,7 @@ struct ath11k_pdev_dp {
#define DP_RXDMA_MONITOR_DESC_RING_SIZE 4096
#define DP_RX_BUFFER_SIZE 2048
+#define DP_RX_BUFFER_SIZE_LITE 1024
#define DP_RX_BUFFER_ALIGN_SIZE 128
#define DP_RXDMA_BUF_COOKIE_BUF_ID GENMASK(17, 0)
@@ -1592,6 +1593,13 @@ struct ath11k_htt_extd_stats_msg {
u8 data[0];
} __packed;
+#define HTT_MAC_ADDR_L32_0 GENMASK(7, 0)
+#define HTT_MAC_ADDR_L32_1 GENMASK(15, 8)
+#define HTT_MAC_ADDR_L32_2 GENMASK(23, 16)
+#define HTT_MAC_ADDR_L32_3 GENMASK(31, 24)
+#define HTT_MAC_ADDR_H16_0 GENMASK(7, 0)
+#define HTT_MAC_ADDR_H16_1 GENMASK(15, 8)
+
struct htt_mac_addr {
u32 mac_addr_l32;
u32 mac_addr_h16;
diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c
index 9a224817630a..75f6d55dca46 100644
--- a/drivers/net/wireless/ath/ath11k/dp_rx.c
+++ b/drivers/net/wireless/ath/ath11k/dp_rx.c
@@ -142,6 +142,18 @@ static u32 ath11k_dp_rx_h_attn_mpdu_err(struct rx_attention *attn)
return errmap;
}
+static bool ath11k_dp_rx_h_attn_msdu_len_err(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ struct rx_attention *rx_attention;
+ u32 errmap;
+
+ rx_attention = ath11k_dp_rx_get_attention(ab, desc);
+ errmap = ath11k_dp_rx_h_attn_mpdu_err(rx_attention);
+
+ return errmap & DP_RX_MPDU_ERR_MSDU_LEN;
+}
+
static u16 ath11k_dp_rx_h_msdu_start_msdu_len(struct ath11k_base *ab,
struct hal_rx_desc *desc)
{
@@ -270,6 +282,18 @@ static bool ath11k_dp_rx_h_attn_is_mcbc(struct ath11k_base *ab,
__le32_to_cpu(attn->info1)));
}
+static bool ath11k_dp_rxdesc_mac_addr2_valid(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hw_params.hw_ops->rx_desc_mac_addr2_valid(desc);
+}
+
+static u8 *ath11k_dp_rxdesc_mpdu_start_addr2(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hw_params.hw_ops->rx_desc_mpdu_start_addr2(desc);
+}
+
static void ath11k_dp_service_mon_ring(struct timer_list *t)
{
struct ath11k_base *ab = from_timer(ab, t, mon_reap_timer);
@@ -2156,6 +2180,7 @@ static void ath11k_dp_rx_h_undecap(struct ath11k *ar, struct sk_buff *msdu,
{
u8 *first_hdr;
u8 decap;
+ struct ethhdr *ehdr;
first_hdr = ath11k_dp_rx_h_80211_hdr(ar->ab, rx_desc);
decap = ath11k_dp_rx_h_msdu_start_decap_type(ar->ab, rx_desc);
@@ -2170,9 +2195,22 @@ static void ath11k_dp_rx_h_undecap(struct ath11k *ar, struct sk_buff *msdu,
decrypted);
break;
case DP_RX_DECAP_TYPE_ETHERNET2_DIX:
- /* TODO undecap support for middle/last msdu's of amsdu */
- ath11k_dp_rx_h_undecap_eth(ar, msdu, first_hdr,
- enctype, status);
+ ehdr = (struct ethhdr *)msdu->data;
+
+ /* mac80211 allows fast path only for authorized STA */
+ if (ehdr->h_proto == cpu_to_be16(ETH_P_PAE)) {
+ ATH11K_SKB_RXCB(msdu)->is_eapol = true;
+ ath11k_dp_rx_h_undecap_eth(ar, msdu, first_hdr,
+ enctype, status);
+ break;
+ }
+
+ /* PN for mcast packets will be validated in mac80211;
+ * remove eth header and add 802.11 header.
+ */
+ if (ATH11K_SKB_RXCB(msdu)->is_mcbc && decrypted)
+ ath11k_dp_rx_h_undecap_eth(ar, msdu, first_hdr,
+ enctype, status);
break;
case DP_RX_DECAP_TYPE_8023:
/* TODO: Handle undecap for these formats */
@@ -2180,35 +2218,62 @@ static void ath11k_dp_rx_h_undecap(struct ath11k *ar, struct sk_buff *msdu,
}
}
+static struct ath11k_peer *
+ath11k_dp_rx_h_find_peer(struct ath11k_base *ab, struct sk_buff *msdu)
+{
+ struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
+ struct hal_rx_desc *rx_desc = rxcb->rx_desc;
+ struct ath11k_peer *peer = NULL;
+
+ lockdep_assert_held(&ab->base_lock);
+
+ if (rxcb->peer_id)
+ peer = ath11k_peer_find_by_id(ab, rxcb->peer_id);
+
+ if (peer)
+ return peer;
+
+ if (!rx_desc || !(ath11k_dp_rxdesc_mac_addr2_valid(ab, rx_desc)))
+ return NULL;
+
+ peer = ath11k_peer_find_by_addr(ab,
+ ath11k_dp_rxdesc_mpdu_start_addr2(ab, rx_desc));
+ return peer;
+}
+
static void ath11k_dp_rx_h_mpdu(struct ath11k *ar,
struct sk_buff *msdu,
struct hal_rx_desc *rx_desc,
struct ieee80211_rx_status *rx_status)
{
- bool fill_crypto_hdr, mcast;
+ bool fill_crypto_hdr;
enum hal_encrypt_type enctype;
bool is_decrypted = false;
+ struct ath11k_skb_rxcb *rxcb;
struct ieee80211_hdr *hdr;
struct ath11k_peer *peer;
struct rx_attention *rx_attention;
u32 err_bitmap;
- hdr = (struct ieee80211_hdr *)msdu->data;
-
/* PN for multicast packets will be checked in mac80211 */
+ rxcb = ATH11K_SKB_RXCB(msdu);
+ fill_crypto_hdr = ath11k_dp_rx_h_attn_is_mcbc(ar->ab, rx_desc);
+ rxcb->is_mcbc = fill_crypto_hdr;
- mcast = is_multicast_ether_addr(hdr->addr1);
- fill_crypto_hdr = mcast;
+ if (rxcb->is_mcbc) {
+ rxcb->peer_id = ath11k_dp_rx_h_mpdu_start_peer_id(ar->ab, rx_desc);
+ rxcb->seq_no = ath11k_dp_rx_h_mpdu_start_seq_no(ar->ab, rx_desc);
+ }
spin_lock_bh(&ar->ab->base_lock);
- peer = ath11k_peer_find_by_addr(ar->ab, hdr->addr2);
+ peer = ath11k_dp_rx_h_find_peer(ar->ab, msdu);
if (peer) {
- if (mcast)
+ if (rxcb->is_mcbc)
enctype = peer->sec_type_grp;
else
enctype = peer->sec_type;
} else {
- enctype = HAL_ENCRYPT_TYPE_OPEN;
+ enctype = ath11k_dp_rx_h_mpdu_start_enctype(ar->ab, rx_desc);
}
spin_unlock_bh(&ar->ab->base_lock);
@@ -2247,8 +2312,11 @@ static void ath11k_dp_rx_h_mpdu(struct ath11k *ar,
if (!is_decrypted || fill_crypto_hdr)
return;
- hdr = (void *)msdu->data;
- hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
+ if (ath11k_dp_rx_h_msdu_start_decap_type(ar->ab, rx_desc) !=
+ DP_RX_DECAP_TYPE_ETHERNET2_DIX) {
+ hdr = (void *)msdu->data;
+ hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
+ }
}
static void ath11k_dp_rx_h_rate(struct ath11k *ar, struct hal_rx_desc *rx_desc,
@@ -2337,8 +2405,10 @@ static void ath11k_dp_rx_h_ppdu(struct ath11k *ar, struct hal_rx_desc *rx_desc,
channel_num = meta_data;
center_freq = meta_data >> 16;
- if (center_freq >= 5935 && center_freq <= 7105) {
+ if (center_freq >= ATH11K_MIN_6G_FREQ &&
+ center_freq <= ATH11K_MAX_6G_FREQ) {
rx_status->band = NL80211_BAND_6GHZ;
+ rx_status->freq = center_freq;
} else if (channel_num >= 1 && channel_num <= 14) {
rx_status->band = NL80211_BAND_2GHZ;
} else if (channel_num >= 36 && channel_num <= 173) {
@@ -2356,57 +2426,56 @@ static void ath11k_dp_rx_h_ppdu(struct ath11k *ar, struct hal_rx_desc *rx_desc,
rx_desc, sizeof(struct hal_rx_desc));
}
- rx_status->freq = ieee80211_channel_to_frequency(channel_num,
- rx_status->band);
+ if (rx_status->band != NL80211_BAND_6GHZ)
+ rx_status->freq = ieee80211_channel_to_frequency(channel_num,
+ rx_status->band);
ath11k_dp_rx_h_rate(ar, rx_desc, rx_status);
}
-static char *ath11k_print_get_tid(struct ieee80211_hdr *hdr, char *out,
- size_t size)
-{
- u8 *qc;
- int tid;
-
- if (!ieee80211_is_data_qos(hdr->frame_control))
- return "";
-
- qc = ieee80211_get_qos_ctl(hdr);
- tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
- snprintf(out, size, "tid %d", tid);
-
- return out;
-}
-
static void ath11k_dp_rx_deliver_msdu(struct ath11k *ar, struct napi_struct *napi,
- struct sk_buff *msdu)
+ struct sk_buff *msdu,
+ struct ieee80211_rx_status *status)
{
static const struct ieee80211_radiotap_he known = {
.data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN),
.data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN),
};
- struct ieee80211_rx_status *status;
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
+ struct ieee80211_rx_status *rx_status;
struct ieee80211_radiotap_he *he = NULL;
- char tid[32];
+ struct ieee80211_sta *pubsta = NULL;
+ struct ath11k_peer *peer;
+ struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
+ u8 decap = DP_RX_DECAP_TYPE_RAW;
+ bool is_mcbc = rxcb->is_mcbc;
+ bool is_eapol = rxcb->is_eapol;
- status = IEEE80211_SKB_RXCB(msdu);
- if (status->encoding == RX_ENC_HE) {
+ if (status->encoding == RX_ENC_HE &&
+ !(status->flag & RX_FLAG_RADIOTAP_HE) &&
+ !(status->flag & RX_FLAG_SKIP_MONITOR)) {
he = skb_push(msdu, sizeof(known));
memcpy(he, &known, sizeof(known));
status->flag |= RX_FLAG_RADIOTAP_HE;
}
+ if (!(status->flag & RX_FLAG_ONLY_MONITOR))
+ decap = ath11k_dp_rx_h_msdu_start_decap_type(ar->ab, rxcb->rx_desc);
+
+ spin_lock_bh(&ar->ab->base_lock);
+ peer = ath11k_dp_rx_h_find_peer(ar->ab, msdu);
+ if (peer && peer->sta)
+ pubsta = peer->sta;
+ spin_unlock_bh(&ar->ab->base_lock);
+
ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
- "rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
+ "rx skb %pK len %u peer %pM %d %s sn %u %s%s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
msdu,
msdu->len,
- ieee80211_get_SA(hdr),
- ath11k_print_get_tid(hdr, tid, sizeof(tid)),
- is_multicast_ether_addr(ieee80211_get_DA(hdr)) ?
- "mcast" : "ucast",
- (__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4,
+ peer ? peer->addr : NULL,
+ rxcb->tid,
+ is_mcbc ? "mcast" : "ucast",
+ rxcb->seq_no,
(status->encoding == RX_ENC_LEGACY) ? "legacy" : "",
(status->encoding == RX_ENC_HT) ? "ht" : "",
(status->encoding == RX_ENC_VHT) ? "vht" : "",
@@ -2426,22 +2495,32 @@ static void ath11k_dp_rx_deliver_msdu(struct ath11k *ar, struct napi_struct *nap
ath11k_dbg_dump(ar->ab, ATH11K_DBG_DP_RX, NULL, "dp rx msdu: ",
msdu->data, msdu->len);
+ rx_status = IEEE80211_SKB_RXCB(msdu);
+ *rx_status = *status;
+
/* TODO: trace rx packet */
- ieee80211_rx_napi(ar->hw, NULL, msdu, napi);
+ /* PN for multicast packets are not validate in HW,
+ * so skip 802.3 rx path
+ * Also, fast_rx expectes the STA to be authorized, hence
+ * eapol packets are sent in slow path.
+ */
+ if (decap == DP_RX_DECAP_TYPE_ETHERNET2_DIX && !is_eapol &&
+ !(is_mcbc && rx_status->flag & RX_FLAG_DECRYPTED))
+ rx_status->flag |= RX_FLAG_8023;
+
+ ieee80211_rx_napi(ar->hw, pubsta, msdu, napi);
}
static int ath11k_dp_rx_process_msdu(struct ath11k *ar,
struct sk_buff *msdu,
- struct sk_buff_head *msdu_list)
+ struct sk_buff_head *msdu_list,
+ struct ieee80211_rx_status *rx_status)
{
struct ath11k_base *ab = ar->ab;
struct hal_rx_desc *rx_desc, *lrx_desc;
struct rx_attention *rx_attention;
- struct ieee80211_rx_status rx_status = {0};
- struct ieee80211_rx_status *status;
struct ath11k_skb_rxcb *rxcb;
- struct ieee80211_hdr *hdr;
struct sk_buff *last_buf;
u8 l3_pad_bytes;
u8 *hdr_status;
@@ -2458,6 +2537,12 @@ static int ath11k_dp_rx_process_msdu(struct ath11k *ar,
}
rx_desc = (struct hal_rx_desc *)msdu->data;
+ if (ath11k_dp_rx_h_attn_msdu_len_err(ab, rx_desc)) {
+ ath11k_warn(ar->ab, "msdu len not valid\n");
+ ret = -EIO;
+ goto free_out;
+ }
+
lrx_desc = (struct hal_rx_desc *)last_buf->data;
rx_attention = ath11k_dp_rx_get_attention(ab, lrx_desc);
if (!ath11k_dp_rx_h_attn_msdu_done(rx_attention)) {
@@ -2497,19 +2582,11 @@ static int ath11k_dp_rx_process_msdu(struct ath11k *ar,
}
}
- hdr = (struct ieee80211_hdr *)msdu->data;
-
- /* Process only data frames */
- if (!ieee80211_is_data(hdr->frame_control))
- return -EINVAL;
-
- ath11k_dp_rx_h_ppdu(ar, rx_desc, &rx_status);
- ath11k_dp_rx_h_mpdu(ar, msdu, rx_desc, &rx_status);
+ ath11k_dp_rx_h_ppdu(ar, rx_desc, rx_status);
+ ath11k_dp_rx_h_mpdu(ar, msdu, rx_desc, rx_status);
- rx_status.flag |= RX_FLAG_SKIP_MONITOR | RX_FLAG_DUP_VALIDATED;
+ rx_status->flag |= RX_FLAG_SKIP_MONITOR | RX_FLAG_DUP_VALIDATED;
- status = IEEE80211_SKB_RXCB(msdu);
- *status = rx_status;
return 0;
free_out:
@@ -2524,6 +2601,7 @@ static void ath11k_dp_rx_process_received_packets(struct ath11k_base *ab,
struct ath11k_skb_rxcb *rxcb;
struct sk_buff *msdu;
struct ath11k *ar;
+ struct ieee80211_rx_status rx_status = {0};
u8 mac_id;
int ret;
@@ -2546,7 +2624,7 @@ static void ath11k_dp_rx_process_received_packets(struct ath11k_base *ab,
continue;
}
- ret = ath11k_dp_rx_process_msdu(ar, msdu, msdu_list);
+ ret = ath11k_dp_rx_process_msdu(ar, msdu, msdu_list, &rx_status);
if (ret) {
ath11k_dbg(ab, ATH11K_DBG_DATA,
"Unable to process msdu %d", ret);
@@ -2554,7 +2632,7 @@ static void ath11k_dp_rx_process_received_packets(struct ath11k_base *ab,
continue;
}
- ath11k_dp_rx_deliver_msdu(ar, napi, msdu);
+ ath11k_dp_rx_deliver_msdu(ar, napi, msdu, &rx_status);
(*quota)--;
}
@@ -2636,10 +2714,14 @@ try_again:
RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU);
rxcb->is_continuation = !!(desc.rx_msdu_info.info0 &
RX_MSDU_DESC_INFO0_MSDU_CONTINUATION);
- rxcb->mac_id = mac_id;
+ rxcb->peer_id = FIELD_GET(RX_MPDU_DESC_META_DATA_PEER_ID,
+ desc.rx_mpdu_info.meta_data);
+ rxcb->seq_no = FIELD_GET(RX_MPDU_DESC_INFO0_SEQ_NUM,
+ desc.rx_mpdu_info.info0);
rxcb->tid = FIELD_GET(HAL_REO_DEST_RING_INFO0_RX_QUEUE_NUM,
desc.info0);
+ rxcb->mac_id = mac_id;
__skb_queue_tail(&msdu_list, msdu);
if (total_msdu_reaped >= quota && !rxcb->is_continuation) {
@@ -2969,6 +3051,8 @@ int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id,
struct ath11k_peer *peer;
struct ath11k_sta *arsta;
int num_buffs_reaped = 0;
+ u32 rx_buf_sz;
+ u16 log_type = 0;
__skb_queue_head_init(&skb_list);
@@ -2981,8 +3065,16 @@ int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id,
memset(&ppdu_info, 0, sizeof(ppdu_info));
ppdu_info.peer_id = HAL_INVALID_PEERID;
- if (ath11k_debugfs_is_pktlog_rx_stats_enabled(ar))
- trace_ath11k_htt_rxdesc(ar, skb->data, DP_RX_BUFFER_SIZE);
+ if (ath11k_debugfs_is_pktlog_lite_mode_enabled(ar)) {
+ log_type = ATH11K_PKTLOG_TYPE_LITE_RX;
+ rx_buf_sz = DP_RX_BUFFER_SIZE_LITE;
+ } else if (ath11k_debugfs_is_pktlog_rx_stats_enabled(ar)) {
+ log_type = ATH11K_PKTLOG_TYPE_RX_STATBUF;
+ rx_buf_sz = DP_RX_BUFFER_SIZE;
+ }
+
+ if (log_type)
+ trace_ath11k_htt_rxdesc(ar, skb->data, log_type, rx_buf_sz);
hal_status = ath11k_hal_rx_parse_mon_status(ab, &ppdu_info, skb);
@@ -3010,7 +3102,7 @@ int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id,
ath11k_dp_rx_update_peer_stats(arsta, &ppdu_info);
if (ath11k_debugfs_is_pktlog_peer_valid(ar, peer->addr))
- trace_ath11k_htt_rxdesc(ar, skb->data, DP_RX_BUFFER_SIZE);
+ trace_ath11k_htt_rxdesc(ar, skb->data, log_type, rx_buf_sz);
spin_unlock_bh(&ab->base_lock);
rcu_read_unlock();
@@ -3310,7 +3402,7 @@ static int ath11k_dp_rx_h_defrag_reo_reinject(struct ath11k *ar, struct dp_rx_ti
paddr = dma_map_single(ab->dev, defrag_skb->data,
defrag_skb->len + skb_tailroom(defrag_skb),
- DMA_FROM_DEVICE);
+ DMA_TO_DEVICE);
if (dma_mapping_error(ab->dev, paddr))
return -ENOMEM;
@@ -3375,7 +3467,7 @@ err_free_idr:
spin_unlock_bh(&rx_refill_ring->idr_lock);
err_unmap_dma:
dma_unmap_single(ab->dev, paddr, defrag_skb->len + skb_tailroom(defrag_skb),
- DMA_FROM_DEVICE);
+ DMA_TO_DEVICE);
return ret;
}
@@ -3941,7 +4033,6 @@ static void ath11k_dp_rx_wbm_err(struct ath11k *ar,
{
struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
struct ieee80211_rx_status rxs = {0};
- struct ieee80211_rx_status *status;
bool drop = true;
switch (rxcb->err_rel_src) {
@@ -3961,10 +4052,7 @@ static void ath11k_dp_rx_wbm_err(struct ath11k *ar,
return;
}
- status = IEEE80211_SKB_RXCB(msdu);
- *status = rxs;
-
- ath11k_dp_rx_deliver_msdu(ar, napi, msdu);
+ ath11k_dp_rx_deliver_msdu(ar, napi, msdu, &rxs);
}
int ath11k_dp_rx_process_wbm_err(struct ath11k_base *ab,
@@ -4848,7 +4936,7 @@ static int ath11k_dp_rx_mon_deliver(struct ath11k *ar, u32 mac_id,
{
struct ath11k_pdev_dp *dp = &ar->dp;
struct sk_buff *mon_skb, *skb_next, *header;
- struct ieee80211_rx_status *rxs = &dp->rx_status, *status;
+ struct ieee80211_rx_status *rxs = &dp->rx_status;
mon_skb = ath11k_dp_rx_mon_merg_msdus(ar, mac_id, head_msdu,
tail_msdu, rxs);
@@ -4874,10 +4962,7 @@ static int ath11k_dp_rx_mon_deliver(struct ath11k *ar, u32 mac_id,
}
rxs->flag |= RX_FLAG_ONLY_MONITOR;
- status = IEEE80211_SKB_RXCB(mon_skb);
- *status = *rxs;
-
- ath11k_dp_rx_deliver_msdu(ar, napi, mon_skb);
+ ath11k_dp_rx_deliver_msdu(ar, napi, mon_skb, rxs);
mon_skb = skb_next;
} while (mon_skb);
rxs->flag = 0;
@@ -5029,7 +5114,7 @@ int ath11k_dp_rx_process_mon_rings(struct ath11k_base *ab, int mac_id,
struct ath11k *ar = ath11k_ab_to_ar(ab, mac_id);
int ret = 0;
- if (test_bit(ATH11K_FLAG_MONITOR_ENABLED, &ar->monitor_flags))
+ if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags))
ret = ath11k_dp_mon_process_rx(ab, mac_id, napi, budget);
else
ret = ath11k_dp_rx_process_mon_status(ab, mac_id, napi, budget);
diff --git a/drivers/net/wireless/ath/ath11k/dp_tx.c b/drivers/net/wireless/ath/ath11k/dp_tx.c
index 8bba5234f81f..70d2cf010a68 100644
--- a/drivers/net/wireless/ath/ath11k/dp_tx.c
+++ b/drivers/net/wireless/ath/ath11k/dp_tx.c
@@ -78,7 +78,7 @@ enum hal_encrypt_type ath11k_dp_tx_get_encrypt_type(u32 cipher)
}
int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif,
- struct sk_buff *skb)
+ struct ath11k_sta *arsta, struct sk_buff *skb)
{
struct ath11k_base *ab = ar->ab;
struct ath11k_dp *dp = &ab->dp;
@@ -145,7 +145,15 @@ tcl_ring_sel:
FIELD_PREP(DP_TX_DESC_ID_MSDU_ID, ret) |
FIELD_PREP(DP_TX_DESC_ID_POOL_ID, pool_id);
ti.encap_type = ath11k_dp_tx_get_encap_type(arvif, skb);
- ti.meta_data_flags = arvif->tcl_metadata;
+
+ if (ieee80211_has_a4(hdr->frame_control) &&
+ is_multicast_ether_addr(hdr->addr3) && arsta &&
+ arsta->use_4addr_set) {
+ ti.meta_data_flags = arsta->tcl_metadata;
+ ti.flags0 |= FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_TO_FW, 1);
+ } else {
+ ti.meta_data_flags = arvif->tcl_metadata;
+ }
if (ti.encap_type == HAL_TCL_ENCAP_TYPE_RAW) {
if (skb_cb->flags & ATH11K_SKB_CIPHER_SET) {
@@ -614,6 +622,9 @@ int ath11k_dp_tx_send_reo_cmd(struct ath11k_base *ab, struct dp_rx_tid *rx_tid,
struct hal_srng *cmd_ring;
int cmd_num;
+ if (test_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags))
+ return -ESHUTDOWN;
+
cmd_ring = &ab->hal.srng_list[dp->reo_cmd_ring.ring_id];
cmd_num = ath11k_hal_reo_cmd_send(ab, cmd_ring, type, cmd);
@@ -1068,12 +1079,16 @@ int ath11k_dp_tx_htt_monitor_mode_ring_config(struct ath11k *ar, bool reset)
for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
ring_id = dp->rx_mon_status_refill_ring[i].refill_buf_ring.ring_id;
- if (!reset)
+ if (!reset) {
tlv_filter.rx_filter =
HTT_RX_MON_FILTER_TLV_FLAGS_MON_STATUS_RING;
- else
+ } else {
tlv_filter = ath11k_mac_mon_status_filter_default;
+ if (ath11k_debugfs_is_extd_rx_stats_enabled(ar))
+ tlv_filter.rx_filter = ath11k_debugfs_rx_filter(ar);
+ }
+
ret = ath11k_dp_tx_htt_rx_filter_setup(ab, ring_id,
dp->mac_id + i,
HAL_RXDMA_MONITOR_STATUS,
diff --git a/drivers/net/wireless/ath/ath11k/dp_tx.h b/drivers/net/wireless/ath/ath11k/dp_tx.h
index f8a9f9c8e444..698b907b878d 100644
--- a/drivers/net/wireless/ath/ath11k/dp_tx.h
+++ b/drivers/net/wireless/ath/ath11k/dp_tx.h
@@ -17,7 +17,7 @@ struct ath11k_dp_htt_wbm_tx_status {
int ath11k_dp_tx_htt_h2t_ver_req_msg(struct ath11k_base *ab);
int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif,
- struct sk_buff *skb);
+ struct ath11k_sta *arsta, struct sk_buff *skb);
void ath11k_dp_tx_completion_handler(struct ath11k_base *ab, int ring_id);
int ath11k_dp_tx_send_reo_cmd(struct ath11k_base *ab, struct dp_rx_tid *rx_tid,
enum hal_reo_cmd_type type,
diff --git a/drivers/net/wireless/ath/ath11k/hal_desc.h b/drivers/net/wireless/ath/ath11k/hal_desc.h
index d54ec6aa6281..00b595b84939 100644
--- a/drivers/net/wireless/ath/ath11k/hal_desc.h
+++ b/drivers/net/wireless/ath/ath11k/hal_desc.h
@@ -496,6 +496,8 @@ struct hal_tlv_hdr {
#define RX_MPDU_DESC_INFO0_DA_IDX_TIMEOUT BIT(29)
#define RX_MPDU_DESC_INFO0_RAW_MPDU BIT(30)
+#define RX_MPDU_DESC_META_DATA_PEER_ID GENMASK(15, 0)
+
struct rx_mpdu_desc {
u32 info0; /* %RX_MPDU_DESC_INFO */
u32 meta_data;
diff --git a/drivers/net/wireless/ath/ath11k/hw.c b/drivers/net/wireless/ath/ath11k/hw.c
index d9596903b0a5..7a343db1dde8 100644
--- a/drivers/net/wireless/ath/ath11k/hw.c
+++ b/drivers/net/wireless/ath/ath11k/hw.c
@@ -97,6 +97,7 @@ static void ath11k_init_wmi_config_qca6390(struct ath11k_base *ab,
config->num_multicast_filter_entries = 0x20;
config->num_wow_filters = 0x16;
config->num_keep_alive_pattern = 0;
+ config->flag1 |= WMI_RSRC_CFG_FLAG1_BSS_CHANNEL_INFO_64;
}
static void ath11k_hw_ipq8074_reo_setup(struct ath11k_base *ab)
@@ -197,6 +198,7 @@ static void ath11k_init_wmi_config_ipq8074(struct ath11k_base *ab,
config->peer_map_unmap_v2_support = 1;
config->twt_ap_pdev_count = ab->num_radios;
config->twt_ap_sta_count = 1000;
+ config->flag1 |= WMI_RSRC_CFG_FLAG1_BSS_CHANNEL_INFO_64;
}
static int ath11k_hw_mac_id_to_pdev_id_ipq8074(struct ath11k_hw_params *hw,
@@ -372,6 +374,17 @@ static void ath11k_hw_ipq8074_rx_desc_set_msdu_len(struct hal_rx_desc *desc, u16
desc->u.ipq8074.msdu_start.info1 = __cpu_to_le32(info);
}
+static bool ath11k_hw_ipq8074_rx_desc_mac_addr2_valid(struct hal_rx_desc *desc)
+{
+ return __le32_to_cpu(desc->u.ipq8074.mpdu_start.info1) &
+ RX_MPDU_START_INFO1_MAC_ADDR2_VALID;
+}
+
+static u8 *ath11k_hw_ipq8074_rx_desc_mpdu_start_addr2(struct hal_rx_desc *desc)
+{
+ return desc->u.ipq8074.mpdu_start.addr2;
+}
+
static
struct rx_attention *ath11k_hw_ipq8074_rx_desc_get_attention(struct hal_rx_desc *desc)
{
@@ -543,6 +556,17 @@ static u8 *ath11k_hw_qcn9074_rx_desc_get_msdu_payload(struct hal_rx_desc *desc)
return &desc->u.qcn9074.msdu_payload[0];
}
+static bool ath11k_hw_ipq9074_rx_desc_mac_addr2_valid(struct hal_rx_desc *desc)
+{
+ return __le32_to_cpu(desc->u.qcn9074.mpdu_start.info11) &
+ RX_MPDU_START_INFO11_MAC_ADDR2_VALID;
+}
+
+static u8 *ath11k_hw_ipq9074_rx_desc_mpdu_start_addr2(struct hal_rx_desc *desc)
+{
+ return desc->u.qcn9074.mpdu_start.addr2;
+}
+
static bool ath11k_hw_wcn6855_rx_desc_get_first_msdu(struct hal_rx_desc *desc)
{
return !!FIELD_GET(RX_MSDU_END_INFO2_FIRST_MSDU_WCN6855,
@@ -703,6 +727,17 @@ static u8 *ath11k_hw_wcn6855_rx_desc_get_msdu_payload(struct hal_rx_desc *desc)
return &desc->u.wcn6855.msdu_payload[0];
}
+static bool ath11k_hw_wcn6855_rx_desc_mac_addr2_valid(struct hal_rx_desc *desc)
+{
+ return __le32_to_cpu(desc->u.wcn6855.mpdu_start.info1) &
+ RX_MPDU_START_INFO1_MAC_ADDR2_VALID;
+}
+
+static u8 *ath11k_hw_wcn6855_rx_desc_mpdu_start_addr2(struct hal_rx_desc *desc)
+{
+ return desc->u.wcn6855.mpdu_start.addr2;
+}
+
static void ath11k_hw_wcn6855_reo_setup(struct ath11k_base *ab)
{
u32 reo_base = HAL_SEQ_WCSS_UMAC_REO_REG;
@@ -799,6 +834,8 @@ const struct ath11k_hw_ops ipq8074_ops = {
.rx_desc_get_msdu_payload = ath11k_hw_ipq8074_rx_desc_get_msdu_payload,
.reo_setup = ath11k_hw_ipq8074_reo_setup,
.mpdu_info_get_peerid = ath11k_hw_ipq8074_mpdu_info_get_peerid,
+ .rx_desc_mac_addr2_valid = ath11k_hw_ipq8074_rx_desc_mac_addr2_valid,
+ .rx_desc_mpdu_start_addr2 = ath11k_hw_ipq8074_rx_desc_mpdu_start_addr2,
};
const struct ath11k_hw_ops ipq6018_ops = {
@@ -835,6 +872,8 @@ const struct ath11k_hw_ops ipq6018_ops = {
.rx_desc_get_msdu_payload = ath11k_hw_ipq8074_rx_desc_get_msdu_payload,
.reo_setup = ath11k_hw_ipq8074_reo_setup,
.mpdu_info_get_peerid = ath11k_hw_ipq8074_mpdu_info_get_peerid,
+ .rx_desc_mac_addr2_valid = ath11k_hw_ipq8074_rx_desc_mac_addr2_valid,
+ .rx_desc_mpdu_start_addr2 = ath11k_hw_ipq8074_rx_desc_mpdu_start_addr2,
};
const struct ath11k_hw_ops qca6390_ops = {
@@ -871,6 +910,8 @@ const struct ath11k_hw_ops qca6390_ops = {
.rx_desc_get_msdu_payload = ath11k_hw_ipq8074_rx_desc_get_msdu_payload,
.reo_setup = ath11k_hw_ipq8074_reo_setup,
.mpdu_info_get_peerid = ath11k_hw_ipq8074_mpdu_info_get_peerid,
+ .rx_desc_mac_addr2_valid = ath11k_hw_ipq8074_rx_desc_mac_addr2_valid,
+ .rx_desc_mpdu_start_addr2 = ath11k_hw_ipq8074_rx_desc_mpdu_start_addr2,
};
const struct ath11k_hw_ops qcn9074_ops = {
@@ -907,6 +948,8 @@ const struct ath11k_hw_ops qcn9074_ops = {
.rx_desc_get_msdu_payload = ath11k_hw_qcn9074_rx_desc_get_msdu_payload,
.reo_setup = ath11k_hw_ipq8074_reo_setup,
.mpdu_info_get_peerid = ath11k_hw_ipq8074_mpdu_info_get_peerid,
+ .rx_desc_mac_addr2_valid = ath11k_hw_ipq9074_rx_desc_mac_addr2_valid,
+ .rx_desc_mpdu_start_addr2 = ath11k_hw_ipq9074_rx_desc_mpdu_start_addr2,
};
const struct ath11k_hw_ops wcn6855_ops = {
@@ -943,6 +986,8 @@ const struct ath11k_hw_ops wcn6855_ops = {
.rx_desc_get_msdu_payload = ath11k_hw_wcn6855_rx_desc_get_msdu_payload,
.reo_setup = ath11k_hw_wcn6855_reo_setup,
.mpdu_info_get_peerid = ath11k_hw_wcn6855_mpdu_info_get_peerid,
+ .rx_desc_mac_addr2_valid = ath11k_hw_wcn6855_rx_desc_mac_addr2_valid,
+ .rx_desc_mpdu_start_addr2 = ath11k_hw_wcn6855_rx_desc_mpdu_start_addr2,
};
#define ATH11K_TX_RING_MASK_0 0x1
diff --git a/drivers/net/wireless/ath/ath11k/hw.h b/drivers/net/wireless/ath/ath11k/hw.h
index 62f5978b3005..1535075eed03 100644
--- a/drivers/net/wireless/ath/ath11k/hw.h
+++ b/drivers/net/wireless/ath/ath11k/hw.h
@@ -128,7 +128,7 @@ struct ath11k_hw_params {
struct {
const char *dir;
size_t board_size;
- size_t cal_size;
+ size_t cal_offset;
} fw;
const struct ath11k_hw_ops *hw_ops;
@@ -153,7 +153,14 @@ struct ath11k_hw_params {
bool vdev_start_delay;
bool htt_peer_map_v2;
bool tcl_0_only;
- u8 spectral_fft_sz;
+
+ struct {
+ u8 fft_sz;
+ u8 fft_pad_sz;
+ u8 summary_pad_sz;
+ u8 fft_hdr_len;
+ u16 max_fft_bins;
+ } spectral;
u16 interface_modes;
bool supports_monitor;
@@ -202,6 +209,8 @@ struct ath11k_hw_ops {
u8 *(*rx_desc_get_msdu_payload)(struct hal_rx_desc *desc);
void (*reo_setup)(struct ath11k_base *ab);
u16 (*mpdu_info_get_peerid)(u8 *tlv_data);
+ bool (*rx_desc_mac_addr2_valid)(struct hal_rx_desc *desc);
+ u8* (*rx_desc_mpdu_start_addr2)(struct hal_rx_desc *desc);
};
extern const struct ath11k_hw_ops ipq8074_ops;
diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
index e9b3689331ec..da850f4b2919 100644
--- a/drivers/net/wireless/ath/ath11k/mac.c
+++ b/drivers/net/wireless/ath/ath11k/mac.c
@@ -150,6 +150,9 @@ static const struct ieee80211_channel ath11k_6ghz_channels[] = {
CHAN6G(225, 7075, 0),
CHAN6G(229, 7095, 0),
CHAN6G(233, 7115, 0),
+
+ /* new addition in IEEE Std 802.11ax-2021 */
+ CHAN6G(2, 5935, 0),
};
static struct ieee80211_rate ath11k_legacy_rates[] = {
@@ -354,6 +357,18 @@ ath11k_mac_max_vht_nss(const u16 vht_mcs_mask[NL80211_VHT_NSS_MAX])
return 1;
}
+static u32
+ath11k_mac_max_he_nss(const u16 he_mcs_mask[NL80211_HE_NSS_MAX])
+{
+ int nss;
+
+ for (nss = NL80211_HE_NSS_MAX - 1; nss >= 0; nss--)
+ if (he_mcs_mask[nss])
+ return nss + 1;
+
+ return 1;
+}
+
static u8 ath11k_parse_mpdudensity(u8 mpdudensity)
{
/* 802.11n D2.0 defined values for "Minimum MPDU Start Spacing":
@@ -488,7 +503,8 @@ struct ath11k_vif *ath11k_mac_get_arvif_by_vdev_id(struct ath11k_base *ab,
for (i = 0; i < ab->num_radios; i++) {
pdev = rcu_dereference(ab->pdevs_active[i]);
- if (pdev && pdev->ar) {
+ if (pdev && pdev->ar &&
+ (pdev->ar->allocated_vdev_map & (1LL << vdev_id))) {
arvif = ath11k_mac_get_arvif(pdev->ar, vdev_id);
if (arvif)
return arvif;
@@ -715,32 +731,386 @@ void ath11k_mac_peer_cleanup_all(struct ath11k *ar)
ar->num_stations = 0;
}
-static int ath11k_monitor_vdev_up(struct ath11k *ar, int vdev_id)
+static inline int ath11k_mac_vdev_setup_sync(struct ath11k *ar)
{
- int ret = 0;
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (test_bit(ATH11K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags))
+ return -ESHUTDOWN;
+
+ if (!wait_for_completion_timeout(&ar->vdev_setup_done,
+ ATH11K_VDEV_SETUP_TIMEOUT_HZ))
+ return -ETIMEDOUT;
+
+ return ar->last_wmi_vdev_start_status ? -EINVAL : 0;
+}
+
+static void
+ath11k_mac_get_any_chandef_iter(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *conf,
+ void *data)
+{
+ struct cfg80211_chan_def **def = data;
+
+ *def = &conf->def;
+}
+
+static int ath11k_mac_monitor_vdev_start(struct ath11k *ar, int vdev_id,
+ struct cfg80211_chan_def *chandef)
+{
+ struct ieee80211_channel *channel;
+ struct wmi_vdev_start_req_arg arg = {};
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ channel = chandef->chan;
+
+ arg.vdev_id = vdev_id;
+ arg.channel.freq = channel->center_freq;
+ arg.channel.band_center_freq1 = chandef->center_freq1;
+ arg.channel.band_center_freq2 = chandef->center_freq2;
+
+ arg.channel.mode = ath11k_phymodes[chandef->chan->band][chandef->width];
+ arg.channel.chan_radar = !!(channel->flags & IEEE80211_CHAN_RADAR);
+
+ arg.channel.min_power = 0;
+ arg.channel.max_power = channel->max_power * 2;
+ arg.channel.max_reg_power = channel->max_reg_power * 2;
+ arg.channel.max_antenna_gain = channel->max_antenna_gain * 2;
+
+ arg.pref_tx_streams = ar->num_tx_chains;
+ arg.pref_rx_streams = ar->num_rx_chains;
+
+ arg.channel.passive = !!(chandef->chan->flags & IEEE80211_CHAN_NO_IR);
+
+ reinit_completion(&ar->vdev_setup_done);
+ reinit_completion(&ar->vdev_delete_done);
+
+ ret = ath11k_wmi_vdev_start(ar, &arg, false);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to request monitor vdev %i start: %d\n",
+ vdev_id, ret);
+ return ret;
+ }
+
+ ret = ath11k_mac_vdev_setup_sync(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to synchronize setup for monitor vdev %i start: %d\n",
+ vdev_id, ret);
+ return ret;
+ }
ret = ath11k_wmi_vdev_up(ar, vdev_id, 0, ar->mac_addr);
if (ret) {
ath11k_warn(ar->ab, "failed to put up monitor vdev %i: %d\n",
vdev_id, ret);
- return ret;
+ goto vdev_stop;
}
ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac monitor vdev %i started\n",
vdev_id);
+
return 0;
+
+vdev_stop:
+ reinit_completion(&ar->vdev_setup_done);
+
+ ret = ath11k_wmi_vdev_stop(ar, vdev_id);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to stop monitor vdev %i after start failure: %d\n",
+ vdev_id, ret);
+ return ret;
+ }
+
+ ret = ath11k_mac_vdev_setup_sync(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to synchronize setup for vdev %i stop: %d\n",
+ vdev_id, ret);
+ return ret;
+ }
+
+ return -EIO;
}
-static int ath11k_mac_op_config(struct ieee80211_hw *hw, u32 changed)
+static int ath11k_mac_monitor_vdev_stop(struct ath11k *ar)
{
- /* mac80211 requires this op to be present and that's why
- * there's an empty function, this can be extended when
- * required.
- */
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ reinit_completion(&ar->vdev_setup_done);
+
+ ret = ath11k_wmi_vdev_stop(ar, ar->monitor_vdev_id);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to request monitor vdev %i stop: %d\n",
+ ar->monitor_vdev_id, ret);
+ return ret;
+ }
+
+ ret = ath11k_mac_vdev_setup_sync(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to synchronize monitor vdev %i stop: %d\n",
+ ar->monitor_vdev_id, ret);
+ return ret;
+ }
+
+ ret = ath11k_wmi_vdev_down(ar, ar->monitor_vdev_id);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to put down monitor vdev %i: %d\n",
+ ar->monitor_vdev_id, ret);
+ return ret;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac monitor vdev %i stopped\n",
+ ar->monitor_vdev_id);
+
+ return 0;
+}
+
+static int ath11k_mac_monitor_vdev_create(struct ath11k *ar)
+{
+ struct ath11k_pdev *pdev = ar->pdev;
+ struct vdev_create_params param = {};
+ int bit, ret;
+ u8 tmp_addr[6] = {0};
+ u16 nss;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (test_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags))
+ return 0;
+
+ if (ar->ab->free_vdev_map == 0) {
+ ath11k_warn(ar->ab, "failed to find free vdev id for monitor vdev\n");
+ return -ENOMEM;
+ }
+
+ bit = __ffs64(ar->ab->free_vdev_map);
+
+ ar->monitor_vdev_id = bit;
+
+ param.if_id = ar->monitor_vdev_id;
+ param.type = WMI_VDEV_TYPE_MONITOR;
+ param.subtype = WMI_VDEV_SUBTYPE_NONE;
+ param.pdev_id = pdev->pdev_id;
+
+ if (pdev->cap.supported_bands & WMI_HOST_WLAN_2G_CAP) {
+ param.chains[NL80211_BAND_2GHZ].tx = ar->num_tx_chains;
+ param.chains[NL80211_BAND_2GHZ].rx = ar->num_rx_chains;
+ }
+ if (pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP) {
+ param.chains[NL80211_BAND_5GHZ].tx = ar->num_tx_chains;
+ param.chains[NL80211_BAND_5GHZ].rx = ar->num_rx_chains;
+ }
+
+ ret = ath11k_wmi_vdev_create(ar, tmp_addr, &param);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to request monitor vdev %i creation: %d\n",
+ ar->monitor_vdev_id, ret);
+ ar->monitor_vdev_id = -1;
+ return ret;
+ }
+
+ nss = get_num_chains(ar->cfg_tx_chainmask) ? : 1;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, ar->monitor_vdev_id,
+ WMI_VDEV_PARAM_NSS, nss);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set vdev %d chainmask 0x%x, nss %d :%d\n",
+ ar->monitor_vdev_id, ar->cfg_tx_chainmask, nss, ret);
+ goto err_vdev_del;
+ }
+
+ ret = ath11k_mac_txpower_recalc(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to recalc txpower for monitor vdev %d: %d\n",
+ ar->monitor_vdev_id, ret);
+ goto err_vdev_del;
+ }
+
+ ar->allocated_vdev_map |= 1LL << ar->monitor_vdev_id;
+ ar->ab->free_vdev_map &= ~(1LL << ar->monitor_vdev_id);
+ ar->num_created_vdevs++;
+ set_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags);
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac monitor vdev %d created\n",
+ ar->monitor_vdev_id);
+
+ return 0;
+
+err_vdev_del:
+ ath11k_wmi_vdev_delete(ar, ar->monitor_vdev_id);
+ ar->monitor_vdev_id = -1;
+ return ret;
+}
+
+static int ath11k_mac_monitor_vdev_delete(struct ath11k *ar)
+{
+ int ret;
+ unsigned long time_left;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (!test_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags))
+ return 0;
+
+ reinit_completion(&ar->vdev_delete_done);
+
+ ret = ath11k_wmi_vdev_delete(ar, ar->monitor_vdev_id);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to request wmi monitor vdev %i removal: %d\n",
+ ar->monitor_vdev_id, ret);
+ return ret;
+ }
+
+ time_left = wait_for_completion_timeout(&ar->vdev_delete_done,
+ ATH11K_VDEV_DELETE_TIMEOUT_HZ);
+ if (time_left == 0) {
+ ath11k_warn(ar->ab, "Timeout in receiving vdev delete response\n");
+ } else {
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac monitor vdev %d deleted\n",
+ ar->monitor_vdev_id);
+
+ ar->allocated_vdev_map &= ~(1LL << ar->monitor_vdev_id);
+ ar->ab->free_vdev_map |= 1LL << (ar->monitor_vdev_id);
+ ar->num_created_vdevs--;
+ ar->monitor_vdev_id = -1;
+ clear_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags);
+ }
+
+ return ret;
+}
+
+static int ath11k_mac_monitor_start(struct ath11k *ar)
+{
+ struct cfg80211_chan_def *chandef = NULL;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags))
+ return 0;
+
+ ieee80211_iter_chan_contexts_atomic(ar->hw,
+ ath11k_mac_get_any_chandef_iter,
+ &chandef);
+ if (!chandef)
+ return 0;
+
+ ret = ath11k_mac_monitor_vdev_start(ar, ar->monitor_vdev_id, chandef);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to start monitor vdev: %d\n", ret);
+ ath11k_mac_monitor_vdev_delete(ar);
+ return ret;
+ }
+
+ set_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags);
+
+ ar->num_started_vdevs++;
+ ret = ath11k_dp_tx_htt_monitor_mode_ring_config(ar, false);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to configure htt monitor mode ring during start: %d",
+ ret);
+ return ret;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac monitor started\n");
return 0;
}
+static int ath11k_mac_monitor_stop(struct ath11k *ar)
+{
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (!test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags))
+ return 0;
+
+ ret = ath11k_mac_monitor_vdev_stop(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to stop monitor vdev: %d\n", ret);
+ return ret;
+ }
+
+ clear_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags);
+ ar->num_started_vdevs--;
+
+ ret = ath11k_dp_tx_htt_monitor_mode_ring_config(ar, true);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to configure htt monitor mode ring during stop: %d",
+ ret);
+ return ret;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac monitor stopped ret %d\n", ret);
+
+ return 0;
+}
+
+static int ath11k_mac_op_config(struct ieee80211_hw *hw, u32 changed)
+{
+ struct ath11k *ar = hw->priv;
+ struct ieee80211_conf *conf = &hw->conf;
+ int ret = 0;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
+ if (conf->flags & IEEE80211_CONF_MONITOR) {
+ set_bit(ATH11K_FLAG_MONITOR_CONF_ENABLED, &ar->monitor_flags);
+
+ if (test_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED,
+ &ar->monitor_flags))
+ goto out;
+
+ ret = ath11k_mac_monitor_vdev_create(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to create monitor vdev: %d",
+ ret);
+ goto out;
+ }
+
+ ret = ath11k_mac_monitor_start(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to start monitor: %d",
+ ret);
+ goto err_mon_del;
+ }
+ } else {
+ clear_bit(ATH11K_FLAG_MONITOR_CONF_ENABLED, &ar->monitor_flags);
+
+ if (!test_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED,
+ &ar->monitor_flags))
+ goto out;
+
+ ret = ath11k_mac_monitor_stop(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to stop monitor: %d",
+ ret);
+ goto out;
+ }
+
+ ret = ath11k_mac_monitor_vdev_delete(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to delete monitor vdev: %d",
+ ret);
+ goto out;
+ }
+ }
+ }
+
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+
+err_mon_del:
+ ath11k_mac_monitor_vdev_delete(ar);
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
static int ath11k_mac_setup_bcn_tmpl(struct ath11k_vif *arvif)
{
struct ath11k *ar = arvif->ar;
@@ -1035,7 +1405,7 @@ ath11k_peer_assoc_h_ht_masked(const u8 ht_mcs_mask[IEEE80211_HT_MCS_MASK_LEN])
}
static bool
-ath11k_peer_assoc_h_vht_masked(const u16 vht_mcs_mask[NL80211_VHT_NSS_MAX])
+ath11k_peer_assoc_h_vht_masked(const u16 vht_mcs_mask[])
{
int nss;
@@ -1093,6 +1463,14 @@ static void ath11k_peer_assoc_h_ht(struct ath11k *ar,
arg->peer_rate_caps |= WMI_HOST_RC_CW40_FLAG;
}
+ /* As firmware handles this two flags (IEEE80211_HT_CAP_SGI_20
+ * and IEEE80211_HT_CAP_SGI_40) for enabling SGI, we reset
+ * both flags if guard interval is Default GI
+ */
+ if (arvif->bitrate_mask.control[band].gi == NL80211_TXRATE_DEFAULT_GI)
+ arg->peer_ht_caps &= ~(IEEE80211_HT_CAP_SGI_20 |
+ IEEE80211_HT_CAP_SGI_40);
+
if (arvif->bitrate_mask.control[band].gi != NL80211_TXRATE_FORCE_LGI) {
if (ht_cap->cap & (IEEE80211_HT_CAP_SGI_20 |
IEEE80211_HT_CAP_SGI_40))
@@ -1207,6 +1585,34 @@ ath11k_peer_assoc_h_vht_limit(u16 tx_mcs_set,
return tx_mcs_set;
}
+static u8 ath11k_get_nss_160mhz(struct ath11k *ar,
+ u8 max_nss)
+{
+ u8 nss_ratio_info = ar->pdev->cap.nss_ratio_info;
+ u8 max_sup_nss = 0;
+
+ switch (nss_ratio_info) {
+ case WMI_NSS_RATIO_1BY2_NSS:
+ max_sup_nss = max_nss >> 1;
+ break;
+ case WMI_NSS_RATIO_3BY4_NSS:
+ ath11k_warn(ar->ab, "WMI_NSS_RATIO_3BY4_NSS not supported\n");
+ break;
+ case WMI_NSS_RATIO_1_NSS:
+ max_sup_nss = max_nss;
+ break;
+ case WMI_NSS_RATIO_2_NSS:
+ ath11k_warn(ar->ab, "WMI_NSS_RATIO_2_NSS not supported\n");
+ break;
+ default:
+ ath11k_warn(ar->ab, "invalid nss ratio received from firmware: %d\n",
+ nss_ratio_info);
+ break;
+ }
+
+ return max_sup_nss;
+}
+
static void ath11k_peer_assoc_h_vht(struct ath11k *ar,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
@@ -1216,10 +1622,12 @@ static void ath11k_peer_assoc_h_vht(struct ath11k *ar,
struct ath11k_vif *arvif = (void *)vif->drv_priv;
struct cfg80211_chan_def def;
enum nl80211_band band;
- const u16 *vht_mcs_mask;
+ u16 *vht_mcs_mask;
u8 ampdu_factor;
u8 max_nss, vht_mcs;
- int i;
+ int i, vht_nss, nss_idx;
+ bool user_rate_valid = true;
+ u32 rx_nss, tx_nss, nss_160;
if (WARN_ON(ath11k_mac_vif_chan(vif, &def)))
return;
@@ -1262,6 +1670,24 @@ static void ath11k_peer_assoc_h_vht(struct ath11k *ar,
if (sta->bandwidth == IEEE80211_STA_RX_BW_160)
arg->bw_160 = true;
+ vht_nss = ath11k_mac_max_vht_nss(vht_mcs_mask);
+
+ if (vht_nss > sta->rx_nss) {
+ user_rate_valid = false;
+ for (nss_idx = sta->rx_nss - 1; nss_idx >= 0; nss_idx--) {
+ if (vht_mcs_mask[nss_idx]) {
+ user_rate_valid = true;
+ break;
+ }
+ }
+ }
+
+ if (!user_rate_valid) {
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac setting vht range mcs value to peer supported nss %d for peer %pM\n",
+ sta->rx_nss, sta->addr);
+ vht_mcs_mask[sta->rx_nss - 1] = vht_mcs_mask[vht_nss - 1];
+ }
+
/* Calculate peer NSS capability from VHT capabilities if STA
* supports VHT.
*/
@@ -1294,10 +1720,95 @@ static void ath11k_peer_assoc_h_vht(struct ath11k *ar,
/* TODO: Check */
arg->tx_max_mcs_nss = 0xFF;
- ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac vht peer %pM max_mpdu %d flags 0x%x\n",
- sta->addr, arg->peer_max_mpdu, arg->peer_flags);
+ if (arg->peer_phymode == MODE_11AC_VHT160 ||
+ arg->peer_phymode == MODE_11AC_VHT80_80) {
+ tx_nss = ath11k_get_nss_160mhz(ar, max_nss);
+ rx_nss = min(arg->peer_nss, tx_nss);
+ arg->peer_bw_rxnss_override = ATH11K_BW_NSS_MAP_ENABLE;
+
+ if (!rx_nss) {
+ ath11k_warn(ar->ab, "invalid max_nss\n");
+ return;
+ }
+
+ if (arg->peer_phymode == MODE_11AC_VHT160)
+ nss_160 = FIELD_PREP(ATH11K_PEER_RX_NSS_160MHZ, rx_nss - 1);
+ else
+ nss_160 = FIELD_PREP(ATH11K_PEER_RX_NSS_80_80MHZ, rx_nss - 1);
+
+ arg->peer_bw_rxnss_override |= nss_160;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "mac vht peer %pM max_mpdu %d flags 0x%x nss_override 0x%x\n",
+ sta->addr, arg->peer_max_mpdu, arg->peer_flags,
+ arg->peer_bw_rxnss_override);
+}
+
+static int ath11k_mac_get_max_he_mcs_map(u16 mcs_map, int nss)
+{
+ switch ((mcs_map >> (2 * nss)) & 0x3) {
+ case IEEE80211_HE_MCS_SUPPORT_0_7: return BIT(8) - 1;
+ case IEEE80211_HE_MCS_SUPPORT_0_9: return BIT(10) - 1;
+ case IEEE80211_HE_MCS_SUPPORT_0_11: return BIT(12) - 1;
+ }
+ return 0;
+}
+
+static u16 ath11k_peer_assoc_h_he_limit(u16 tx_mcs_set,
+ const u16 he_mcs_limit[NL80211_HE_NSS_MAX])
+{
+ int idx_limit;
+ int nss;
+ u16 mcs_map;
+ u16 mcs;
+
+ for (nss = 0; nss < NL80211_HE_NSS_MAX; nss++) {
+ mcs_map = ath11k_mac_get_max_he_mcs_map(tx_mcs_set, nss) &
+ he_mcs_limit[nss];
+
+ if (mcs_map)
+ idx_limit = fls(mcs_map) - 1;
+ else
+ idx_limit = -1;
+
+ switch (idx_limit) {
+ case 0 ... 7:
+ mcs = IEEE80211_HE_MCS_SUPPORT_0_7;
+ break;
+ case 8:
+ case 9:
+ mcs = IEEE80211_HE_MCS_SUPPORT_0_9;
+ break;
+ case 10:
+ case 11:
+ mcs = IEEE80211_HE_MCS_SUPPORT_0_11;
+ break;
+ default:
+ WARN_ON(1);
+ fallthrough;
+ case -1:
+ mcs = IEEE80211_HE_MCS_NOT_SUPPORTED;
+ break;
+ }
- /* TODO: rxnss_override */
+ tx_mcs_set &= ~(0x3 << (nss * 2));
+ tx_mcs_set |= mcs << (nss * 2);
+ }
+
+ return tx_mcs_set;
+}
+
+static bool
+ath11k_peer_assoc_h_he_masked(const u16 he_mcs_mask[NL80211_HE_NSS_MAX])
+{
+ int nss;
+
+ for (nss = 0; nss < NL80211_HE_NSS_MAX; nss++)
+ if (he_mcs_mask[nss])
+ return false;
+
+ return true;
}
static void ath11k_peer_assoc_h_he(struct ath11k *ar,
@@ -1305,13 +1816,30 @@ static void ath11k_peer_assoc_h_he(struct ath11k *ar,
struct ieee80211_sta *sta,
struct peer_assoc_params *arg)
{
+ struct ath11k_vif *arvif = (void *)vif->drv_priv;
+ struct cfg80211_chan_def def;
const struct ieee80211_sta_he_cap *he_cap = &sta->he_cap;
u8 ampdu_factor;
- u16 v;
+ enum nl80211_band band;
+ u16 *he_mcs_mask;
+ u8 max_nss, he_mcs;
+ u16 he_tx_mcs = 0, v = 0;
+ int i, he_nss, nss_idx;
+ bool user_rate_valid = true;
+ u32 rx_nss, tx_nss, nss_160;
+
+ if (WARN_ON(ath11k_mac_vif_chan(vif, &def)))
+ return;
if (!he_cap->has_he)
return;
+ band = def.chan->band;
+ he_mcs_mask = arvif->bitrate_mask.control[band].he_mcs;
+
+ if (ath11k_peer_assoc_h_he_masked(he_mcs_mask))
+ return;
+
arg->he_flag = true;
memcpy_and_pad(&arg->peer_he_cap_macinfo,
@@ -1388,25 +1916,48 @@ static void ath11k_peer_assoc_h_he(struct ath11k *ar,
if (he_cap->he_cap_elem.mac_cap_info[0] & IEEE80211_HE_MAC_CAP0_TWT_REQ)
arg->twt_requester = true;
+ he_nss = ath11k_mac_max_he_nss(he_mcs_mask);
+
+ if (he_nss > sta->rx_nss) {
+ user_rate_valid = false;
+ for (nss_idx = sta->rx_nss - 1; nss_idx >= 0; nss_idx--) {
+ if (he_mcs_mask[nss_idx]) {
+ user_rate_valid = true;
+ break;
+ }
+ }
+ }
+
+ if (!user_rate_valid) {
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac setting he range mcs value to peer supported nss %d for peer %pM\n",
+ sta->rx_nss, sta->addr);
+ he_mcs_mask[sta->rx_nss - 1] = he_mcs_mask[he_nss - 1];
+ }
+
switch (sta->bandwidth) {
case IEEE80211_STA_RX_BW_160:
if (he_cap->he_cap_elem.phy_cap_info[0] &
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G) {
v = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_80p80);
+ v = ath11k_peer_assoc_h_he_limit(v, he_mcs_mask);
arg->peer_he_rx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_80_80] = v;
v = le16_to_cpu(he_cap->he_mcs_nss_supp.tx_mcs_80p80);
arg->peer_he_tx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_80_80] = v;
arg->peer_he_mcs_count++;
+ he_tx_mcs = v;
}
v = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_160);
arg->peer_he_rx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_160] = v;
v = le16_to_cpu(he_cap->he_mcs_nss_supp.tx_mcs_160);
+ v = ath11k_peer_assoc_h_he_limit(v, he_mcs_mask);
arg->peer_he_tx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_160] = v;
arg->peer_he_mcs_count++;
+ if (!he_tx_mcs)
+ he_tx_mcs = v;
fallthrough;
default:
@@ -1414,11 +1965,102 @@ static void ath11k_peer_assoc_h_he(struct ath11k *ar,
arg->peer_he_rx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_80] = v;
v = le16_to_cpu(he_cap->he_mcs_nss_supp.tx_mcs_80);
+ v = ath11k_peer_assoc_h_he_limit(v, he_mcs_mask);
arg->peer_he_tx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_80] = v;
arg->peer_he_mcs_count++;
+ if (!he_tx_mcs)
+ he_tx_mcs = v;
break;
}
+
+ /* Calculate peer NSS capability from HE capabilities if STA
+ * supports HE.
+ */
+ for (i = 0, max_nss = 0, he_mcs = 0; i < NL80211_HE_NSS_MAX; i++) {
+ he_mcs = he_tx_mcs >> (2 * i) & 3;
+
+ /* In case of fixed rates, MCS Range in he_tx_mcs might have
+ * unsupported range, with he_mcs_mask set, so check either of them
+ * to find nss.
+ */
+ if (he_mcs != IEEE80211_HE_MCS_NOT_SUPPORTED ||
+ he_mcs_mask[i])
+ max_nss = i + 1;
+ }
+ arg->peer_nss = min(sta->rx_nss, max_nss);
+
+ if (arg->peer_phymode == MODE_11AX_HE160 ||
+ arg->peer_phymode == MODE_11AX_HE80_80) {
+ tx_nss = ath11k_get_nss_160mhz(ar, max_nss);
+ rx_nss = min(arg->peer_nss, tx_nss);
+ arg->peer_bw_rxnss_override = ATH11K_BW_NSS_MAP_ENABLE;
+
+ if (!rx_nss) {
+ ath11k_warn(ar->ab, "invalid max_nss\n");
+ return;
+ }
+
+ if (arg->peer_phymode == MODE_11AX_HE160)
+ nss_160 = FIELD_PREP(ATH11K_PEER_RX_NSS_160MHZ, rx_nss - 1);
+ else
+ nss_160 = FIELD_PREP(ATH11K_PEER_RX_NSS_80_80MHZ, rx_nss - 1);
+
+ arg->peer_bw_rxnss_override |= nss_160;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "mac he peer %pM nss %d mcs cnt %d nss_override 0x%x\n",
+ sta->addr, arg->peer_nss,
+ arg->peer_he_mcs_count,
+ arg->peer_bw_rxnss_override);
+}
+
+static void ath11k_peer_assoc_h_he_6ghz(struct ath11k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct peer_assoc_params *arg)
+{
+ const struct ieee80211_sta_he_cap *he_cap = &sta->he_cap;
+ struct cfg80211_chan_def def;
+ enum nl80211_band band;
+ u8 ampdu_factor;
+
+ if (WARN_ON(ath11k_mac_vif_chan(vif, &def)))
+ return;
+
+ band = def.chan->band;
+
+ if (!arg->he_flag || band != NL80211_BAND_6GHZ || !sta->he_6ghz_capa.capa)
+ return;
+
+ if (sta->bandwidth == IEEE80211_STA_RX_BW_80)
+ arg->bw_80 = true;
+
+ if (sta->bandwidth == IEEE80211_STA_RX_BW_160)
+ arg->bw_160 = true;
+
+ arg->peer_he_caps_6ghz = le16_to_cpu(sta->he_6ghz_capa.capa);
+ arg->peer_mpdu_density =
+ ath11k_parse_mpdudensity(FIELD_GET(IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START,
+ arg->peer_he_caps_6ghz));
+
+ /* From IEEE Std 802.11ax-2021 - Section 10.12.2: An HE STA shall be capable of
+ * receiving A-MPDU where the A-MPDU pre-EOF padding length is up to the value
+ * indicated by the Maximum A-MPDU Length Exponent Extension field in the HE
+ * Capabilities element and the Maximum A-MPDU Length Exponent field in HE 6 GHz
+ * Band Capabilities element in the 6 GHz band.
+ *
+ * Here, we are extracting the Max A-MPDU Exponent Extension from HE caps and
+ * factor is the Maximum A-MPDU Length Exponent from HE 6 GHZ Band capability.
+ */
+ ampdu_factor = FIELD_GET(IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK,
+ he_cap->he_cap_elem.mac_cap_info[3]) +
+ FIELD_GET(IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP,
+ arg->peer_he_caps_6ghz);
+
+ arg->peer_max_mpdu = (1u << (IEEE80211_HE_6GHZ_MAX_AMPDU_FACTOR +
+ ampdu_factor)) - 1;
}
static void ath11k_peer_assoc_h_smps(struct ieee80211_sta *sta,
@@ -1427,11 +2069,16 @@ static void ath11k_peer_assoc_h_smps(struct ieee80211_sta *sta,
const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
int smps;
- if (!ht_cap->ht_supported)
+ if (!ht_cap->ht_supported && !sta->he_6ghz_capa.capa)
return;
- smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS;
- smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT;
+ if (ht_cap->ht_supported) {
+ smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS;
+ smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT;
+ } else {
+ smps = le16_get_bits(sta->he_6ghz_capa.capa,
+ IEEE80211_HE_6GHZ_CAP_SM_PS);
+ }
switch (smps) {
case WLAN_HT_CAP_SM_PS_STATIC:
@@ -1621,6 +2268,7 @@ static void ath11k_peer_assoc_h_phymode(struct ath11k *ar,
enum nl80211_band band;
const u8 *ht_mcs_mask;
const u16 *vht_mcs_mask;
+ const u16 *he_mcs_mask;
enum wmi_phy_mode phymode = MODE_UNKNOWN;
if (WARN_ON(ath11k_mac_vif_chan(vif, &def)))
@@ -1629,10 +2277,12 @@ static void ath11k_peer_assoc_h_phymode(struct ath11k *ar,
band = def.chan->band;
ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
+ he_mcs_mask = arvif->bitrate_mask.control[band].he_mcs;
switch (band) {
case NL80211_BAND_2GHZ:
- if (sta->he_cap.has_he) {
+ if (sta->he_cap.has_he &&
+ !ath11k_peer_assoc_h_he_masked(he_mcs_mask)) {
if (sta->bandwidth == IEEE80211_STA_RX_BW_80)
phymode = MODE_11AX_HE80_2G;
else if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
@@ -1660,7 +2310,8 @@ static void ath11k_peer_assoc_h_phymode(struct ath11k *ar,
case NL80211_BAND_5GHZ:
case NL80211_BAND_6GHZ:
/* Check HE first */
- if (sta->he_cap.has_he) {
+ if (sta->he_cap.has_he &&
+ !ath11k_peer_assoc_h_he_masked(he_mcs_mask)) {
phymode = ath11k_mac_get_phymode_he(ar, sta);
} else if (sta->vht_cap.vht_supported &&
!ath11k_peer_assoc_h_vht_masked(vht_mcs_mask)) {
@@ -1702,11 +2353,12 @@ static void ath11k_peer_assoc_prepare(struct ath11k *ar,
ath11k_peer_assoc_h_basic(ar, vif, sta, arg);
ath11k_peer_assoc_h_crypto(ar, vif, sta, arg);
ath11k_peer_assoc_h_rates(ar, vif, sta, arg);
+ ath11k_peer_assoc_h_phymode(ar, vif, sta, arg);
ath11k_peer_assoc_h_ht(ar, vif, sta, arg);
ath11k_peer_assoc_h_vht(ar, vif, sta, arg);
ath11k_peer_assoc_h_he(ar, vif, sta, arg);
+ ath11k_peer_assoc_h_he_6ghz(ar, vif, sta, arg);
ath11k_peer_assoc_h_qos(ar, vif, sta, arg);
- ath11k_peer_assoc_h_phymode(ar, vif, sta, arg);
ath11k_peer_assoc_h_smps(sta, arg);
/* TODO: amsdu_disable req? */
@@ -1714,15 +2366,20 @@ static void ath11k_peer_assoc_prepare(struct ath11k *ar,
static int ath11k_setup_peer_smps(struct ath11k *ar, struct ath11k_vif *arvif,
const u8 *addr,
- const struct ieee80211_sta_ht_cap *ht_cap)
+ const struct ieee80211_sta_ht_cap *ht_cap,
+ u16 he_6ghz_capa)
{
int smps;
- if (!ht_cap->ht_supported)
+ if (!ht_cap->ht_supported && !he_6ghz_capa)
return 0;
- smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS;
- smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT;
+ if (ht_cap->ht_supported) {
+ smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS;
+ smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT;
+ } else {
+ smps = FIELD_GET(IEEE80211_HE_6GHZ_CAP_SM_PS, he_6ghz_capa);
+ }
if (smps >= ARRAY_SIZE(ath11k_smps_map))
return -EINVAL;
@@ -1775,7 +2432,8 @@ static void ath11k_bss_assoc(struct ieee80211_hw *hw,
}
ret = ath11k_setup_peer_smps(ar, arvif, bss_conf->bssid,
- &ap_sta->ht_cap);
+ &ap_sta->ht_cap,
+ le16_to_cpu(ap_sta->he_6ghz_capa.capa));
if (ret) {
ath11k_warn(ar->ab, "failed to setup peer SMPS for vdev %d: %d\n",
arvif->vdev_id, ret);
@@ -1956,7 +2614,7 @@ static int ath11k_mac_config_obss_pd(struct ath11k *ar,
/* Set and enable SRG/non-SRG OBSS PD Threshold */
param_id = WMI_PDEV_PARAM_SET_CMD_OBSS_PD_THRESHOLD;
- if (test_bit(ATH11K_FLAG_MONITOR_ENABLED, &ar->monitor_flags)) {
+ if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags)) {
ret = ath11k_wmi_pdev_set_param(ar, param_id, 0, pdev_id);
if (ret)
ath11k_warn(ar->ab,
@@ -2383,18 +3041,21 @@ void __ath11k_mac_scan_finish(struct ath11k *ar)
break;
case ATH11K_SCAN_RUNNING:
case ATH11K_SCAN_ABORTING:
+ if (ar->scan.is_roc && ar->scan.roc_notify)
+ ieee80211_remain_on_channel_expired(ar->hw);
+ fallthrough;
+ case ATH11K_SCAN_STARTING:
if (!ar->scan.is_roc) {
struct cfg80211_scan_info info = {
- .aborted = (ar->scan.state ==
- ATH11K_SCAN_ABORTING),
+ .aborted = ((ar->scan.state ==
+ ATH11K_SCAN_ABORTING) ||
+ (ar->scan.state ==
+ ATH11K_SCAN_STARTING)),
};
ieee80211_scan_completed(ar->hw, &info);
- } else if (ar->scan.roc_notify) {
- ieee80211_remain_on_channel_expired(ar->hw);
}
- fallthrough;
- case ATH11K_SCAN_STARTING:
+
ar->scan.state = ATH11K_SCAN_IDLE;
ar->scan_channel = NULL;
ar->scan.roc_freq = 0;
@@ -2887,6 +3548,20 @@ ath11k_mac_bitrate_mask_num_vht_rates(struct ath11k *ar,
}
static int
+ath11k_mac_bitrate_mask_num_he_rates(struct ath11k *ar,
+ enum nl80211_band band,
+ const struct cfg80211_bitrate_mask *mask)
+{
+ int num_rates = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mask->control[band].he_mcs); i++)
+ num_rates += hweight16(mask->control[band].he_mcs[i]);
+
+ return num_rates;
+}
+
+static int
ath11k_mac_set_peer_vht_fixed_rate(struct ath11k_vif *arvif,
struct ieee80211_sta *sta,
const struct cfg80211_bitrate_mask *mask,
@@ -2914,6 +3589,10 @@ ath11k_mac_set_peer_vht_fixed_rate(struct ath11k_vif *arvif,
return -EINVAL;
}
+ /* Avoid updating invalid nss as fixed rate*/
+ if (nss > sta->rx_nss)
+ return -EINVAL;
+
ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
"Setting Fixed VHT Rate for peer %pM. Device will not switch to any other selected rates",
sta->addr);
@@ -2932,6 +3611,57 @@ ath11k_mac_set_peer_vht_fixed_rate(struct ath11k_vif *arvif,
return ret;
}
+static int
+ath11k_mac_set_peer_he_fixed_rate(struct ath11k_vif *arvif,
+ struct ieee80211_sta *sta,
+ const struct cfg80211_bitrate_mask *mask,
+ enum nl80211_band band)
+{
+ struct ath11k *ar = arvif->ar;
+ u8 he_rate, nss;
+ u32 rate_code;
+ int ret, i;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ nss = 0;
+
+ for (i = 0; i < ARRAY_SIZE(mask->control[band].he_mcs); i++) {
+ if (hweight16(mask->control[band].he_mcs[i]) == 1) {
+ nss = i + 1;
+ he_rate = ffs(mask->control[band].he_mcs[i]) - 1;
+ }
+ }
+
+ if (!nss) {
+ ath11k_warn(ar->ab, "No single he fixed rate found to set for %pM",
+ sta->addr);
+ return -EINVAL;
+ }
+
+ /* Avoid updating invalid nss as fixed rate */
+ if (nss > sta->rx_nss)
+ return -EINVAL;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "mac setting fixed he rate for peer %pM, device will not switch to any other selected rates",
+ sta->addr);
+
+ rate_code = ATH11K_HW_RATE_CODE(he_rate, nss - 1,
+ WMI_RATE_PREAMBLE_HE);
+
+ ret = ath11k_wmi_set_peer_param(ar, sta->addr,
+ arvif->vdev_id,
+ WMI_PEER_PARAM_FIXED_RATE,
+ rate_code);
+ if (ret)
+ ath11k_warn(ar->ab,
+ "failed to update sta %pM fixed rate %d: %d\n",
+ sta->addr, rate_code, ret);
+
+ return ret;
+}
+
static int ath11k_station_assoc(struct ath11k *ar,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
@@ -2943,7 +3673,7 @@ static int ath11k_station_assoc(struct ath11k *ar,
struct cfg80211_chan_def def;
enum nl80211_band band;
struct cfg80211_bitrate_mask *mask;
- u8 num_vht_rates;
+ u8 num_vht_rates, num_he_rates;
lockdep_assert_held(&ar->conf_mutex);
@@ -2969,9 +3699,10 @@ static int ath11k_station_assoc(struct ath11k *ar,
}
num_vht_rates = ath11k_mac_bitrate_mask_num_vht_rates(ar, band, mask);
+ num_he_rates = ath11k_mac_bitrate_mask_num_he_rates(ar, band, mask);
- /* If single VHT rate is configured (by set_bitrate_mask()),
- * peer_assoc will disable VHT. This is now enabled by a peer specific
+ /* If single VHT/HE rate is configured (by set_bitrate_mask()),
+ * peer_assoc will disable VHT/HE. This is now enabled by a peer specific
* fixed param.
* Note that all other rates and NSS will be disabled for this peer.
*/
@@ -2980,6 +3711,11 @@ static int ath11k_station_assoc(struct ath11k *ar,
band);
if (ret)
return ret;
+ } else if (sta->he_cap.has_he && num_he_rates == 1) {
+ ret = ath11k_mac_set_peer_he_fixed_rate(arvif, sta, mask,
+ band);
+ if (ret)
+ return ret;
}
/* Re-assoc is run only to update supported rates for given station. It
@@ -2989,7 +3725,7 @@ static int ath11k_station_assoc(struct ath11k *ar,
return 0;
ret = ath11k_setup_peer_smps(ar, arvif, sta->addr,
- &sta->ht_cap);
+ &sta->ht_cap, le16_to_cpu(sta->he_6ghz_capa.capa));
if (ret) {
ath11k_warn(ar->ab, "failed to setup peer SMPS for vdev %d: %d\n",
arvif->vdev_id, ret);
@@ -3050,8 +3786,9 @@ static void ath11k_sta_rc_update_wk(struct work_struct *wk)
enum nl80211_band band;
const u8 *ht_mcs_mask;
const u16 *vht_mcs_mask;
+ const u16 *he_mcs_mask;
u32 changed, bw, nss, smps;
- int err, num_vht_rates;
+ int err, num_vht_rates, num_he_rates;
const struct cfg80211_bitrate_mask *mask;
struct peer_assoc_params peer_arg;
@@ -3066,6 +3803,7 @@ static void ath11k_sta_rc_update_wk(struct work_struct *wk)
band = def.chan->band;
ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
+ he_mcs_mask = arvif->bitrate_mask.control[band].he_mcs;
spin_lock_bh(&ar->data_lock);
@@ -3081,8 +3819,9 @@ static void ath11k_sta_rc_update_wk(struct work_struct *wk)
mutex_lock(&ar->conf_mutex);
nss = max_t(u32, 1, nss);
- nss = min(nss, max(ath11k_mac_max_ht_nss(ht_mcs_mask),
- ath11k_mac_max_vht_nss(vht_mcs_mask)));
+ nss = min(nss, max(max(ath11k_mac_max_ht_nss(ht_mcs_mask),
+ ath11k_mac_max_vht_nss(vht_mcs_mask)),
+ ath11k_mac_max_he_nss(he_mcs_mask)));
if (changed & IEEE80211_RC_BW_CHANGED) {
err = ath11k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id,
@@ -3118,6 +3857,8 @@ static void ath11k_sta_rc_update_wk(struct work_struct *wk)
mask = &arvif->bitrate_mask;
num_vht_rates = ath11k_mac_bitrate_mask_num_vht_rates(ar, band,
mask);
+ num_he_rates = ath11k_mac_bitrate_mask_num_he_rates(ar, band,
+ mask);
/* Peer_assoc_prepare will reject vht rates in
* bitrate_mask if its not available in range format and
@@ -3133,11 +3874,25 @@ static void ath11k_sta_rc_update_wk(struct work_struct *wk)
if (sta->vht_cap.vht_supported && num_vht_rates == 1) {
ath11k_mac_set_peer_vht_fixed_rate(arvif, sta, mask,
band);
+ } else if (sta->he_cap.has_he && num_he_rates == 1) {
+ ath11k_mac_set_peer_he_fixed_rate(arvif, sta, mask,
+ band);
} else {
- /* If the peer is non-VHT or no fixed VHT rate
+ /* If the peer is non-VHT/HE or no fixed VHT/HE rate
* is provided in the new bitrate mask we set the
- * other rates using peer_assoc command.
+ * other rates using peer_assoc command. Also clear
+ * the peer fixed rate settings as it has higher proprity
+ * than peer assoc
*/
+ err = ath11k_wmi_set_peer_param(ar, sta->addr,
+ arvif->vdev_id,
+ WMI_PEER_PARAM_FIXED_RATE,
+ WMI_FIXED_RATE_NONE);
+ if (err)
+ ath11k_warn(ar->ab,
+ "failed to disable peer fixed rate for sta %pM: %d\n",
+ sta->addr, err);
+
ath11k_peer_assoc_prepare(ar, arvif->vif, sta,
&peer_arg, true);
@@ -3155,6 +3910,31 @@ static void ath11k_sta_rc_update_wk(struct work_struct *wk)
mutex_unlock(&ar->conf_mutex);
}
+static void ath11k_sta_set_4addr_wk(struct work_struct *wk)
+{
+ struct ath11k *ar;
+ struct ath11k_vif *arvif;
+ struct ath11k_sta *arsta;
+ struct ieee80211_sta *sta;
+ int ret = 0;
+
+ arsta = container_of(wk, struct ath11k_sta, set_4addr_wk);
+ sta = container_of((void *)arsta, struct ieee80211_sta, drv_priv);
+ arvif = arsta->arvif;
+ ar = arvif->ar;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "setting USE_4ADDR for peer %pM\n", sta->addr);
+
+ ret = ath11k_wmi_set_peer_param(ar, sta->addr,
+ arvif->vdev_id,
+ WMI_PEER_USE_4ADDR, 1);
+
+ if (ret)
+ ath11k_warn(ar->ab, "failed to set peer %pM 4addr capability: %d\n",
+ sta->addr, ret);
+}
+
static int ath11k_mac_inc_num_stations(struct ath11k_vif *arvif,
struct ieee80211_sta *sta)
{
@@ -3234,11 +4014,13 @@ static int ath11k_mac_station_add(struct ath11k *ar,
}
if (ieee80211_vif_is_mesh(vif)) {
+ ath11k_dbg(ab, ATH11K_DBG_MAC,
+ "setting USE_4ADDR for mesh STA %pM\n", sta->addr);
ret = ath11k_wmi_set_peer_param(ar, sta->addr,
arvif->vdev_id,
WMI_PEER_USE_4ADDR, 1);
if (ret) {
- ath11k_warn(ab, "failed to STA %pM 4addr capability: %d\n",
+ ath11k_warn(ab, "failed to set mesh STA %pM 4addr capability: %d\n",
sta->addr, ret);
goto free_tx_stats;
}
@@ -3291,8 +4073,10 @@ static int ath11k_mac_op_sta_state(struct ieee80211_hw *hw,
/* cancel must be done outside the mutex to avoid deadlock */
if ((old_state == IEEE80211_STA_NONE &&
- new_state == IEEE80211_STA_NOTEXIST))
+ new_state == IEEE80211_STA_NOTEXIST)) {
cancel_work_sync(&arsta->update_wk);
+ cancel_work_sync(&arsta->set_4addr_wk);
+ }
mutex_lock(&ar->conf_mutex);
@@ -3301,6 +4085,7 @@ static int ath11k_mac_op_sta_state(struct ieee80211_hw *hw,
memset(arsta, 0, sizeof(*arsta));
arsta->arvif = arvif;
INIT_WORK(&arsta->update_wk, ath11k_sta_rc_update_wk);
+ INIT_WORK(&arsta->set_4addr_wk, ath11k_sta_set_4addr_wk);
ret = ath11k_mac_station_add(ar, vif, sta);
if (ret)
@@ -3395,6 +4180,19 @@ out:
return ret;
}
+static void ath11k_mac_op_sta_set_4addr(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, bool enabled)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
+
+ if (enabled && !arsta->use_4addr_set) {
+ ieee80211_queue_work(ar->hw, &arsta->set_4addr_wk);
+ arsta->use_4addr_set = true;
+ }
+}
+
static void ath11k_mac_op_sta_rc_update(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
@@ -3765,11 +4563,6 @@ ath11k_create_vht_cap(struct ath11k *ar, u32 rate_cap_tx_chainmask,
ath11k_set_vht_txbf_cap(ar, &vht_cap.cap);
- /* TODO: Enable back VHT160 mode once association issues are fixed */
- /* Disabling VHT160 and VHT80+80 modes */
- vht_cap.cap &= ~IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK;
- vht_cap.cap &= ~IEEE80211_VHT_CAP_SHORT_GI_160;
-
rxmcs_map = 0;
txmcs_map = 0;
for (i = 0; i < 8; i++) {
@@ -3814,7 +4607,9 @@ static void ath11k_mac_setup_ht_vht_cap(struct ath11k *ar,
rate_cap_rx_chainmask);
}
- if (cap->supported_bands & WMI_HOST_WLAN_5G_CAP && !ar->supports_6ghz) {
+ if (cap->supported_bands & WMI_HOST_WLAN_5G_CAP &&
+ (ar->ab->hw_params.single_pdev_only ||
+ !ar->supports_6ghz)) {
band = &ar->mac.sbands[NL80211_BAND_5GHZ];
ht_cap = cap->band[NL80211_BAND_5GHZ].ht_cap_info;
if (ht_cap_info)
@@ -4313,6 +5108,7 @@ static void ath11k_mac_op_tx(struct ieee80211_hw *hw,
struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ieee80211_key_conf *key = info->control.hw_key;
+ struct ath11k_sta *arsta = NULL;
u32 info_flags = info->flags;
bool is_prb_rsp;
int ret;
@@ -4338,7 +5134,10 @@ static void ath11k_mac_op_tx(struct ieee80211_hw *hw,
return;
}
- ret = ath11k_dp_tx(ar, arvif, skb);
+ if (control->sta)
+ arsta = (struct ath11k_sta *)control->sta->drv_priv;
+
+ ret = ath11k_dp_tx(ar, arvif, arsta, skb);
if (ret) {
ath11k_warn(ar->ab, "failed to transmit frame %d\n", ret);
ieee80211_free_txskb(ar->hw, skb);
@@ -4639,7 +5438,8 @@ static void ath11k_mac_op_update_vif_offload(struct ieee80211_hw *hw,
if (ath11k_frame_mode != ATH11K_HW_TXRX_ETHERNET ||
(vif->type != NL80211_IFTYPE_STATION &&
vif->type != NL80211_IFTYPE_AP))
- vif->offload_flags &= ~IEEE80211_OFFLOAD_ENCAP_ENABLED;
+ vif->offload_flags &= ~(IEEE80211_OFFLOAD_ENCAP_ENABLED |
+ IEEE80211_OFFLOAD_DECAP_ENABLED);
if (vif->offload_flags & IEEE80211_OFFLOAD_ENCAP_ENABLED)
param_value = ATH11K_HW_TXRX_ETHERNET;
@@ -4655,6 +5455,22 @@ static void ath11k_mac_op_update_vif_offload(struct ieee80211_hw *hw,
arvif->vdev_id, ret);
vif->offload_flags &= ~IEEE80211_OFFLOAD_ENCAP_ENABLED;
}
+
+ param_id = WMI_VDEV_PARAM_RX_DECAP_TYPE;
+ if (vif->offload_flags & IEEE80211_OFFLOAD_DECAP_ENABLED)
+ param_value = ATH11K_HW_TXRX_ETHERNET;
+ else if (test_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags))
+ param_value = ATH11K_HW_TXRX_RAW;
+ else
+ param_value = ATH11K_HW_TXRX_NATIVE_WIFI;
+
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ param_id, param_value);
+ if (ret) {
+ ath11k_warn(ab, "failed to set vdev %d rx decap mode: %d\n",
+ arvif->vdev_id, ret);
+ vif->offload_flags &= ~IEEE80211_OFFLOAD_DECAP_ENABLED;
+ }
}
static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
@@ -4683,8 +5499,8 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
}
if (ar->num_created_vdevs > (TARGET_NUM_VDEVS - 1)) {
- ath11k_warn(ab, "failed to create vdev, reached max vdev limit %d\n",
- TARGET_NUM_VDEVS);
+ ath11k_warn(ab, "failed to create vdev %u, reached max vdev limit %d\n",
+ ar->num_created_vdevs, TARGET_NUM_VDEVS);
ret = -EBUSY;
goto err;
}
@@ -4700,10 +5516,13 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
for (i = 0; i < ARRAY_SIZE(arvif->bitrate_mask.control); i++) {
arvif->bitrate_mask.control[i].legacy = 0xffffffff;
+ arvif->bitrate_mask.control[i].gi = NL80211_TXRATE_FORCE_SGI;
memset(arvif->bitrate_mask.control[i].ht_mcs, 0xff,
sizeof(arvif->bitrate_mask.control[i].ht_mcs));
memset(arvif->bitrate_mask.control[i].vht_mcs, 0xff,
sizeof(arvif->bitrate_mask.control[i].vht_mcs));
+ memset(arvif->bitrate_mask.control[i].he_mcs, 0xff,
+ sizeof(arvif->bitrate_mask.control[i].he_mcs));
}
bit = __ffs64(ab->free_vdev_map);
@@ -4724,6 +5543,7 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
break;
case NL80211_IFTYPE_MONITOR:
arvif->vdev_type = WMI_VDEV_TYPE_MONITOR;
+ ar->monitor_vdev_id = bit;
break;
default:
WARN_ON(1);
@@ -4825,6 +5645,9 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
goto err_peer_del;
}
break;
+ case WMI_VDEV_TYPE_MONITOR:
+ set_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags);
+ break;
default:
break;
}
@@ -4845,6 +5668,16 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
ath11k_dp_vdev_tx_attach(ar, arvif);
+ if (vif->type != NL80211_IFTYPE_MONITOR &&
+ test_bit(ATH11K_FLAG_MONITOR_CONF_ENABLED, &ar->monitor_flags)) {
+ ret = ath11k_mac_monitor_vdev_create(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to create monitor vdev during add interface: %d",
+ ret);
+ goto err_peer_del;
+ }
+ }
+
mutex_unlock(&ar->conf_mutex);
return 0;
@@ -4942,6 +5775,18 @@ static void ath11k_mac_op_remove_interface(struct ieee80211_hw *hw,
ath11k_dbg(ab, ATH11K_DBG_MAC, "vdev %pM deleted, vdev_id %d\n",
vif->addr, arvif->vdev_id);
+ if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
+ clear_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags);
+ ar->monitor_vdev_id = -1;
+ } else if (test_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags) &&
+ !test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags)) {
+ ret = ath11k_mac_monitor_vdev_delete(ar);
+ if (ret)
+ /* continue even if there's an error */
+ ath11k_warn(ar->ab, "failed to delete vdev monitor during remove interface: %d",
+ ret);
+ }
+
err_vdev_del:
spin_lock_bh(&ar->data_lock);
list_del(&arvif->list);
@@ -4961,7 +5806,6 @@ err_vdev_del:
/* Recalc txpower for remaining vdev */
ath11k_mac_txpower_recalc(ar);
- clear_bit(ATH11K_FLAG_MONITOR_ENABLED, &ar->monitor_flags);
/* TODO: recal traffic pause state based on the available vdevs */
@@ -4984,8 +5828,6 @@ static void ath11k_mac_op_configure_filter(struct ieee80211_hw *hw,
u64 multicast)
{
struct ath11k *ar = hw->priv;
- bool reset_flag = false;
- int ret = 0;
mutex_lock(&ar->conf_mutex);
@@ -4993,23 +5835,6 @@ static void ath11k_mac_op_configure_filter(struct ieee80211_hw *hw,
*total_flags &= SUPPORTED_FILTERS;
ar->filter_flags = *total_flags;
- /* For monitor mode */
- reset_flag = !(ar->filter_flags & FIF_BCN_PRBRESP_PROMISC);
-
- ret = ath11k_dp_tx_htt_monitor_mode_ring_config(ar, reset_flag);
- if (!ret) {
- if (!reset_flag)
- set_bit(ATH11K_FLAG_MONITOR_ENABLED, &ar->monitor_flags);
- else
- clear_bit(ATH11K_FLAG_MONITOR_ENABLED, &ar->monitor_flags);
- } else {
- ath11k_warn(ar->ab,
- "fail to set monitor filter: %d\n", ret);
- }
- ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
- "changed_flags:0x%x, total_flags:0x%x, reset_flag:%d\n",
- changed_flags, *total_flags, reset_flag);
-
mutex_unlock(&ar->conf_mutex);
}
@@ -5118,20 +5943,6 @@ static void ath11k_mac_op_remove_chanctx(struct ieee80211_hw *hw,
mutex_unlock(&ar->conf_mutex);
}
-static inline int ath11k_mac_vdev_setup_sync(struct ath11k *ar)
-{
- lockdep_assert_held(&ar->conf_mutex);
-
- if (test_bit(ATH11K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags))
- return -ESHUTDOWN;
-
- if (!wait_for_completion_timeout(&ar->vdev_setup_done,
- ATH11K_VDEV_SETUP_TIMEOUT_HZ))
- return -ETIMEDOUT;
-
- return ar->last_wmi_vdev_start_status ? -EINVAL : 0;
-}
-
static int
ath11k_mac_vdev_start_restart(struct ath11k_vif *arvif,
const struct cfg80211_chan_def *chandef,
@@ -5214,7 +6025,9 @@ ath11k_mac_vdev_start_restart(struct ath11k_vif *arvif,
return ret;
}
- ar->num_started_vdevs++;
+ if (!restart)
+ ar->num_started_vdevs++;
+
ath11k_dbg(ab, ATH11K_DBG_MAC, "vdev %pM started, vdev_id %d\n",
arvif->vif->addr, arvif->vdev_id);
@@ -5342,12 +6155,16 @@ ath11k_mac_update_vif_chan(struct ath11k *ar,
struct ath11k_vif *arvif;
int ret;
int i;
+ bool monitor_vif = false;
lockdep_assert_held(&ar->conf_mutex);
for (i = 0; i < n_vifs; i++) {
arvif = (void *)vifs[i].vif->drv_priv;
+ if (vifs[i].vif->type == NL80211_IFTYPE_MONITOR)
+ monitor_vif = true;
+
ath11k_dbg(ab, ATH11K_DBG_MAC,
"mac chanctx switch vdev_id %i freq %u->%u width %d->%d\n",
arvif->vdev_id,
@@ -5368,6 +6185,8 @@ ath11k_mac_update_vif_chan(struct ath11k *ar,
arvif->vdev_id, ret);
continue;
}
+
+ ar->num_started_vdevs--;
}
/* All relevant vdevs are downed and associated channel resources
@@ -5405,6 +6224,24 @@ ath11k_mac_update_vif_chan(struct ath11k *ar,
continue;
}
}
+
+ /* Restart the internal monitor vdev on new channel */
+ if (!monitor_vif &&
+ test_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags)) {
+ ret = ath11k_mac_monitor_stop(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to stop monitor during vif channel update: %d",
+ ret);
+ return;
+ }
+
+ ret = ath11k_mac_monitor_start(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to start monitor during vif channel update: %d",
+ ret);
+ return;
+ }
+ }
}
static void
@@ -5484,7 +6321,7 @@ static int ath11k_start_vdev_delay(struct ieee80211_hw *hw,
}
if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
- ret = ath11k_monitor_vdev_up(ar, arvif->vdev_id);
+ ret = ath11k_wmi_vdev_up(ar, arvif->vdev_id, 0, ar->mac_addr);
if (ret) {
ath11k_warn(ab, "failed put monitor up: %d\n", ret);
return ret;
@@ -5544,6 +6381,18 @@ ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
}
}
+ if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
+ ret = ath11k_mac_monitor_start(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to start monitor during vif channel context assignment: %d",
+ ret);
+ goto out;
+ }
+
+ arvif->is_started = true;
+ goto out;
+ }
+
ret = ath11k_mac_vdev_start(arvif, &ctx->def);
if (ret) {
ath11k_warn(ab, "failed to start vdev %i addr %pM on freq %d: %d\n",
@@ -5551,14 +6400,19 @@ ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
ctx->def.chan->center_freq, ret);
goto out;
}
- if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
- ret = ath11k_monitor_vdev_up(ar, arvif->vdev_id);
- if (ret)
- goto out;
- }
arvif->is_started = true;
+ if (arvif->vdev_type != WMI_VDEV_TYPE_MONITOR &&
+ test_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags)) {
+ ret = ath11k_mac_monitor_start(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to start monitor during vif channel context assignment: %d",
+ ret);
+ goto out;
+ }
+ }
+
/* TODO: Setup ps and cts/rts protection */
ret = 0;
@@ -5592,6 +6446,20 @@ ath11k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
ath11k_peer_find_by_addr(ab, ar->mac_addr))
ath11k_peer_delete(ar, arvif->vdev_id, ar->mac_addr);
+ if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
+ ret = ath11k_mac_monitor_stop(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to stop monitor during vif channel context unassignment: %d",
+ ret);
+ mutex_unlock(&ar->conf_mutex);
+ return;
+ }
+
+ arvif->is_started = false;
+ mutex_unlock(&ar->conf_mutex);
+ return;
+ }
+
ret = ath11k_mac_vdev_stop(arvif);
if (ret)
ath11k_warn(ab, "failed to stop vdev %i: %d\n",
@@ -5603,6 +6471,16 @@ ath11k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
arvif->vdev_type == WMI_VDEV_TYPE_MONITOR)
ath11k_wmi_vdev_down(ar, arvif->vdev_id);
+ if (arvif->vdev_type != WMI_VDEV_TYPE_MONITOR &&
+ ar->num_started_vdevs == 1 &&
+ test_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags)) {
+ ret = ath11k_mac_monitor_stop(ar);
+ if (ret)
+ /* continue even if there's an error */
+ ath11k_warn(ar->ab, "failed to stop monitor during vif channel context unassignment: %d",
+ ret);
+ }
+
mutex_unlock(&ar->conf_mutex);
}
@@ -5720,9 +6598,26 @@ ath11k_mac_has_single_legacy_rate(struct ath11k *ar,
if (ath11k_mac_bitrate_mask_num_vht_rates(ar, band, mask))
return false;
+ if (ath11k_mac_bitrate_mask_num_he_rates(ar, band, mask))
+ return false;
+
return num_rates == 1;
}
+static __le16
+ath11k_mac_get_tx_mcs_map(const struct ieee80211_sta_he_cap *he_cap)
+{
+ if (he_cap->he_cap_elem.phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)
+ return he_cap->he_mcs_nss_supp.tx_mcs_80p80;
+
+ if (he_cap->he_cap_elem.phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G)
+ return he_cap->he_mcs_nss_supp.tx_mcs_160;
+
+ return he_cap->he_mcs_nss_supp.tx_mcs_80;
+}
+
static bool
ath11k_mac_bitrate_mask_get_single_nss(struct ath11k *ar,
enum nl80211_band band,
@@ -5731,8 +6626,10 @@ ath11k_mac_bitrate_mask_get_single_nss(struct ath11k *ar,
{
struct ieee80211_supported_band *sband = &ar->mac.sbands[band];
u16 vht_mcs_map = le16_to_cpu(sband->vht_cap.vht_mcs.tx_mcs_map);
+ u16 he_mcs_map = 0;
u8 ht_nss_mask = 0;
u8 vht_nss_mask = 0;
+ u8 he_nss_mask = 0;
int i;
/* No need to consider legacy here. Basic rates are always present
@@ -5759,7 +6656,20 @@ ath11k_mac_bitrate_mask_get_single_nss(struct ath11k *ar,
return false;
}
- if (ht_nss_mask != vht_nss_mask)
+ he_mcs_map = le16_to_cpu(ath11k_mac_get_tx_mcs_map(&sband->iftype_data->he_cap));
+
+ for (i = 0; i < ARRAY_SIZE(mask->control[band].he_mcs); i++) {
+ if (mask->control[band].he_mcs[i] == 0)
+ continue;
+
+ if (mask->control[band].he_mcs[i] ==
+ ath11k_mac_get_max_he_mcs_map(he_mcs_map, i))
+ he_nss_mask |= BIT(i);
+ else
+ return false;
+ }
+
+ if (ht_nss_mask != vht_nss_mask || ht_nss_mask != he_nss_mask)
return false;
if (ht_nss_mask == 0)
@@ -5806,42 +6716,125 @@ ath11k_mac_get_single_legacy_rate(struct ath11k *ar,
return 0;
}
-static int ath11k_mac_set_fixed_rate_params(struct ath11k_vif *arvif,
- u32 rate, u8 nss, u8 sgi, u8 ldpc)
+static int
+ath11k_mac_set_fixed_rate_gi_ltf(struct ath11k_vif *arvif, u8 he_gi, u8 he_ltf)
{
struct ath11k *ar = arvif->ar;
- u32 vdev_param;
int ret;
- lockdep_assert_held(&ar->conf_mutex);
+ /* 0.8 = 0, 1.6 = 2 and 3.2 = 3. */
+ if (he_gi && he_gi != 0xFF)
+ he_gi += 1;
- ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac set fixed rate params vdev %i rate 0x%02x nss %u sgi %u\n",
- arvif->vdev_id, rate, nss, sgi);
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ WMI_VDEV_PARAM_SGI, he_gi);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set he gi %d: %d\n",
+ he_gi, ret);
+ return ret;
+ }
+ /* start from 1 */
+ if (he_ltf != 0xFF)
+ he_ltf += 1;
- vdev_param = WMI_VDEV_PARAM_FIXED_RATE;
ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
- vdev_param, rate);
+ WMI_VDEV_PARAM_HE_LTF, he_ltf);
if (ret) {
- ath11k_warn(ar->ab, "failed to set fixed rate param 0x%02x: %d\n",
- rate, ret);
+ ath11k_warn(ar->ab, "failed to set he ltf %d: %d\n",
+ he_ltf, ret);
return ret;
}
- vdev_param = WMI_VDEV_PARAM_NSS;
+ return 0;
+}
+
+static int
+ath11k_mac_set_auto_rate_gi_ltf(struct ath11k_vif *arvif, u16 he_gi, u8 he_ltf)
+{
+ struct ath11k *ar = arvif->ar;
+ int ret;
+ u32 he_ar_gi_ltf;
+
+ if (he_gi != 0xFF) {
+ switch (he_gi) {
+ case NL80211_RATE_INFO_HE_GI_0_8:
+ he_gi = WMI_AUTORATE_800NS_GI;
+ break;
+ case NL80211_RATE_INFO_HE_GI_1_6:
+ he_gi = WMI_AUTORATE_1600NS_GI;
+ break;
+ case NL80211_RATE_INFO_HE_GI_3_2:
+ he_gi = WMI_AUTORATE_3200NS_GI;
+ break;
+ default:
+ ath11k_warn(ar->ab, "invalid he gi: %d\n", he_gi);
+ return -EINVAL;
+ }
+ }
+
+ if (he_ltf != 0xFF) {
+ switch (he_ltf) {
+ case NL80211_RATE_INFO_HE_1XLTF:
+ he_ltf = WMI_HE_AUTORATE_LTF_1X;
+ break;
+ case NL80211_RATE_INFO_HE_2XLTF:
+ he_ltf = WMI_HE_AUTORATE_LTF_2X;
+ break;
+ case NL80211_RATE_INFO_HE_4XLTF:
+ he_ltf = WMI_HE_AUTORATE_LTF_4X;
+ break;
+ default:
+ ath11k_warn(ar->ab, "invalid he ltf: %d\n", he_ltf);
+ return -EINVAL;
+ }
+ }
+
+ he_ar_gi_ltf = he_gi | he_ltf;
ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
- vdev_param, nss);
+ WMI_VDEV_PARAM_AUTORATE_MISC_CFG,
+ he_ar_gi_ltf);
if (ret) {
- ath11k_warn(ar->ab, "failed to set nss param %d: %d\n",
- nss, ret);
+ ath11k_warn(ar->ab,
+ "failed to set he autorate gi %u ltf %u: %d\n",
+ he_gi, he_ltf, ret);
return ret;
}
- vdev_param = WMI_VDEV_PARAM_SGI;
+ return 0;
+}
+
+static int ath11k_mac_set_rate_params(struct ath11k_vif *arvif,
+ u32 rate, u8 nss, u8 sgi, u8 ldpc,
+ u8 he_gi, u8 he_ltf, bool he_fixed_rate)
+{
+ struct ath11k *ar = arvif->ar;
+ u32 vdev_param;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "mac set rate params vdev %i rate 0x%02x nss 0x%02x sgi 0x%02x ldpc 0x%02x he_gi 0x%02x he_ltf 0x%02x he_fixed_rate %d\n",
+ arvif->vdev_id, rate, nss, sgi, ldpc, he_gi,
+ he_ltf, he_fixed_rate);
+
+ if (!arvif->vif->bss_conf.he_support) {
+ vdev_param = WMI_VDEV_PARAM_FIXED_RATE;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ vdev_param, rate);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set fixed rate param 0x%02x: %d\n",
+ rate, ret);
+ return ret;
+ }
+ }
+
+ vdev_param = WMI_VDEV_PARAM_NSS;
ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
- vdev_param, sgi);
+ vdev_param, nss);
if (ret) {
- ath11k_warn(ar->ab, "failed to set sgi param %d: %d\n",
- sgi, ret);
+ ath11k_warn(ar->ab, "failed to set nss param %d: %d\n",
+ nss, ret);
return ret;
}
@@ -5854,6 +6847,35 @@ static int ath11k_mac_set_fixed_rate_params(struct ath11k_vif *arvif,
return ret;
}
+ if (arvif->vif->bss_conf.he_support) {
+ if (he_fixed_rate) {
+ ret = ath11k_mac_set_fixed_rate_gi_ltf(arvif, he_gi,
+ he_ltf);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set fixed rate gi ltf: %d\n",
+ ret);
+ return ret;
+ }
+ } else {
+ ret = ath11k_mac_set_auto_rate_gi_ltf(arvif, he_gi,
+ he_ltf);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set auto rate gi ltf: %d\n",
+ ret);
+ return ret;
+ }
+ }
+ } else {
+ vdev_param = WMI_VDEV_PARAM_SGI;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ vdev_param, sgi);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set sgi param %d: %d\n",
+ sgi, ret);
+ return ret;
+ }
+ }
+
return 0;
}
@@ -5882,6 +6904,31 @@ ath11k_mac_vht_mcs_range_present(struct ath11k *ar,
return true;
}
+static bool
+ath11k_mac_he_mcs_range_present(struct ath11k *ar,
+ enum nl80211_band band,
+ const struct cfg80211_bitrate_mask *mask)
+{
+ int i;
+ u16 he_mcs;
+
+ for (i = 0; i < NL80211_HE_NSS_MAX; i++) {
+ he_mcs = mask->control[band].he_mcs[i];
+
+ switch (he_mcs) {
+ case 0:
+ case BIT(8) - 1:
+ case BIT(10) - 1:
+ case BIT(12) - 1:
+ break;
+ default:
+ return false;
+ }
+ }
+
+ return true;
+}
+
static void ath11k_mac_set_bitrate_mask_iter(void *data,
struct ieee80211_sta *sta)
{
@@ -5913,6 +6960,54 @@ static void ath11k_mac_disable_peer_fixed_rate(void *data,
sta->addr, ret);
}
+static bool
+ath11k_mac_validate_vht_he_fixed_rate_settings(struct ath11k *ar, enum nl80211_band band,
+ const struct cfg80211_bitrate_mask *mask)
+{
+ bool he_fixed_rate = false, vht_fixed_rate = false;
+ struct ath11k_peer *peer, *tmp;
+ const u16 *vht_mcs_mask, *he_mcs_mask;
+ u8 vht_nss, he_nss;
+ bool ret = true;
+
+ vht_mcs_mask = mask->control[band].vht_mcs;
+ he_mcs_mask = mask->control[band].he_mcs;
+
+ if (ath11k_mac_bitrate_mask_num_vht_rates(ar, band, mask) == 1)
+ vht_fixed_rate = true;
+
+ if (ath11k_mac_bitrate_mask_num_he_rates(ar, band, mask) == 1)
+ he_fixed_rate = true;
+
+ if (!vht_fixed_rate && !he_fixed_rate)
+ return true;
+
+ vht_nss = ath11k_mac_max_vht_nss(vht_mcs_mask);
+ he_nss = ath11k_mac_max_he_nss(he_mcs_mask);
+
+ rcu_read_lock();
+ spin_lock_bh(&ar->ab->base_lock);
+ list_for_each_entry_safe(peer, tmp, &ar->ab->peers, list) {
+ if (peer->sta) {
+ if (vht_fixed_rate && (!peer->sta->vht_cap.vht_supported ||
+ peer->sta->rx_nss < vht_nss)) {
+ ret = false;
+ goto out;
+ }
+ if (he_fixed_rate && (!peer->sta->he_cap.has_he ||
+ peer->sta->rx_nss < he_nss)) {
+ ret = false;
+ goto out;
+ }
+ }
+ }
+
+out:
+ spin_unlock_bh(&ar->ab->base_lock);
+ rcu_read_unlock();
+ return ret;
+}
+
static int
ath11k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
@@ -5924,6 +7019,9 @@ ath11k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
enum nl80211_band band;
const u8 *ht_mcs_mask;
const u16 *vht_mcs_mask;
+ const u16 *he_mcs_mask;
+ u8 he_ltf = 0;
+ u8 he_gi = 0;
u32 rate;
u8 nss;
u8 sgi;
@@ -5931,6 +7029,7 @@ ath11k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
int single_nss;
int ret;
int num_rates;
+ bool he_fixed_rate = false;
if (ath11k_mac_vif_chan(vif, &def))
return -EPERM;
@@ -5938,12 +7037,16 @@ ath11k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
band = def.chan->band;
ht_mcs_mask = mask->control[band].ht_mcs;
vht_mcs_mask = mask->control[band].vht_mcs;
+ he_mcs_mask = mask->control[band].he_mcs;
ldpc = !!(ar->ht_cap_info & WMI_HT_CAP_LDPC);
sgi = mask->control[band].gi;
if (sgi == NL80211_TXRATE_FORCE_LGI)
return -EINVAL;
+ he_gi = mask->control[band].he_gi;
+ he_ltf = mask->control[band].he_ltf;
+
/* mac80211 doesn't support sending a fixed HT/VHT MCS alone, rather it
* requires passing atleast one of used basic rates along with them.
* Fixed rate setting across different preambles(legacy, HT, VHT) is
@@ -5967,11 +7070,22 @@ ath11k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
&single_nss)) {
rate = WMI_FIXED_RATE_NONE;
nss = single_nss;
+ mutex_lock(&ar->conf_mutex);
+ arvif->bitrate_mask = *mask;
+ ieee80211_iterate_stations_atomic(ar->hw,
+ ath11k_mac_set_bitrate_mask_iter,
+ arvif);
+ mutex_unlock(&ar->conf_mutex);
} else {
rate = WMI_FIXED_RATE_NONE;
+
+ if (!ath11k_mac_validate_vht_he_fixed_rate_settings(ar, band, mask))
+ ath11k_warn(ar->ab,
+ "could not update fixed rate settings to all peers due to mcs/nss incompaitiblity\n");
nss = min_t(u32, ar->num_tx_chains,
- max(ath11k_mac_max_ht_nss(ht_mcs_mask),
- ath11k_mac_max_vht_nss(vht_mcs_mask)));
+ max(max(ath11k_mac_max_ht_nss(ht_mcs_mask),
+ ath11k_mac_max_vht_nss(vht_mcs_mask)),
+ ath11k_mac_max_he_nss(he_mcs_mask)));
/* If multiple rates across different preambles are given
* we can reconfigure this info with all peers using PEER_ASSOC
@@ -6002,16 +7116,28 @@ ath11k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
* RATEMASK CMD
*/
ath11k_warn(ar->ab,
- "Setting more than one MCS Value in bitrate mask not supported\n");
+ "setting %d mcs values in bitrate mask not supported\n",
+ num_rates);
return -EINVAL;
}
+ num_rates = ath11k_mac_bitrate_mask_num_he_rates(ar, band,
+ mask);
+ if (num_rates == 1)
+ he_fixed_rate = true;
+
+ if (!ath11k_mac_he_mcs_range_present(ar, band, mask) &&
+ num_rates > 1) {
+ ath11k_warn(ar->ab,
+ "Setting more than one HE MCS Value in bitrate mask not supported\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&ar->conf_mutex);
ieee80211_iterate_stations_atomic(ar->hw,
ath11k_mac_disable_peer_fixed_rate,
arvif);
- mutex_lock(&ar->conf_mutex);
-
arvif->bitrate_mask = *mask;
ieee80211_iterate_stations_atomic(ar->hw,
ath11k_mac_set_bitrate_mask_iter,
@@ -6022,9 +7148,10 @@ ath11k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
mutex_lock(&ar->conf_mutex);
- ret = ath11k_mac_set_fixed_rate_params(arvif, rate, nss, sgi, ldpc);
+ ret = ath11k_mac_set_rate_params(arvif, rate, nss, sgi, ldpc, he_gi,
+ he_ltf, he_fixed_rate);
if (ret) {
- ath11k_warn(ar->ab, "failed to set fixed rate params on vdev %i: %d\n",
+ ath11k_warn(ar->ab, "failed to set rate params on vdev %i: %d\n",
arvif->vdev_id, ret);
}
@@ -6109,7 +7236,13 @@ static int ath11k_mac_op_get_survey(struct ieee80211_hw *hw, int idx,
if (!sband)
sband = hw->wiphy->bands[NL80211_BAND_5GHZ];
+ if (sband && idx >= sband->n_channels) {
+ idx -= sband->n_channels;
+ sband = NULL;
+ }
+ if (!sband)
+ sband = hw->wiphy->bands[NL80211_BAND_6GHZ];
if (!sband || idx >= sband->n_channels) {
ret = -ENOENT;
goto exit;
@@ -6180,6 +7313,7 @@ static const struct ieee80211_ops ath11k_ops = {
.cancel_hw_scan = ath11k_mac_op_cancel_hw_scan,
.set_key = ath11k_mac_op_set_key,
.sta_state = ath11k_mac_op_sta_state,
+ .sta_set_4addr = ath11k_mac_op_sta_set_4addr,
.sta_set_txpwr = ath11k_mac_op_sta_set_txpwr,
.sta_rc_update = ath11k_mac_op_sta_rc_update,
.conf_tx = ath11k_mac_op_conf_tx,
@@ -6240,7 +7374,7 @@ static int ath11k_mac_setup_channels_rates(struct ath11k *ar,
u32 supported_bands)
{
struct ieee80211_supported_band *band;
- struct ath11k_hal_reg_capabilities_ext *reg_cap;
+ struct ath11k_hal_reg_capabilities_ext *reg_cap, *temp_reg_cap;
void *channels;
u32 phy_id;
@@ -6250,6 +7384,7 @@ static int ath11k_mac_setup_channels_rates(struct ath11k *ar,
ATH11K_NUM_CHANS);
reg_cap = &ar->ab->hal_reg_cap[ar->pdev_idx];
+ temp_reg_cap = reg_cap;
if (supported_bands & WMI_HOST_WLAN_2G_CAP) {
channels = kmemdup(ath11k_2ghz_channels,
@@ -6268,11 +7403,11 @@ static int ath11k_mac_setup_channels_rates(struct ath11k *ar,
if (ar->ab->hw_params.single_pdev_only) {
phy_id = ath11k_get_phy_id(ar, WMI_HOST_WLAN_2G_CAP);
- reg_cap = &ar->ab->hal_reg_cap[phy_id];
+ temp_reg_cap = &ar->ab->hal_reg_cap[phy_id];
}
ath11k_mac_update_ch_list(ar, band,
- reg_cap->low_2ghz_chan,
- reg_cap->high_2ghz_chan);
+ temp_reg_cap->low_2ghz_chan,
+ temp_reg_cap->high_2ghz_chan);
}
if (supported_bands & WMI_HOST_WLAN_5G_CAP) {
@@ -6292,9 +7427,15 @@ static int ath11k_mac_setup_channels_rates(struct ath11k *ar,
band->n_bitrates = ath11k_a_rates_size;
band->bitrates = ath11k_a_rates;
ar->hw->wiphy->bands[NL80211_BAND_6GHZ] = band;
+
+ if (ar->ab->hw_params.single_pdev_only) {
+ phy_id = ath11k_get_phy_id(ar, WMI_HOST_WLAN_5G_CAP);
+ temp_reg_cap = &ar->ab->hal_reg_cap[phy_id];
+ }
+
ath11k_mac_update_ch_list(ar, band,
- reg_cap->low_5ghz_chan,
- reg_cap->high_5ghz_chan);
+ temp_reg_cap->low_5ghz_chan,
+ temp_reg_cap->high_5ghz_chan);
}
if (reg_cap->low_5ghz_chan < ATH11K_MIN_6G_FREQ) {
@@ -6317,12 +7458,12 @@ static int ath11k_mac_setup_channels_rates(struct ath11k *ar,
if (ar->ab->hw_params.single_pdev_only) {
phy_id = ath11k_get_phy_id(ar, WMI_HOST_WLAN_5G_CAP);
- reg_cap = &ar->ab->hal_reg_cap[phy_id];
+ temp_reg_cap = &ar->ab->hal_reg_cap[phy_id];
}
ath11k_mac_update_ch_list(ar, band,
- reg_cap->low_5ghz_chan,
- reg_cap->high_5ghz_chan);
+ temp_reg_cap->low_5ghz_chan,
+ temp_reg_cap->high_5ghz_chan);
}
}
@@ -6367,7 +7508,9 @@ static int ath11k_mac_setup_iface_combinations(struct ath11k *ar)
combinations[0].radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
BIT(NL80211_CHAN_WIDTH_20) |
BIT(NL80211_CHAN_WIDTH_40) |
- BIT(NL80211_CHAN_WIDTH_80);
+ BIT(NL80211_CHAN_WIDTH_80) |
+ BIT(NL80211_CHAN_WIDTH_80P80) |
+ BIT(NL80211_CHAN_WIDTH_160);
ar->hw->wiphy->iface_combinations = combinations;
ar->hw->wiphy->n_iface_combinations = 1;
@@ -6505,8 +7648,16 @@ static int __ath11k_mac_register(struct ath11k *ar)
ieee80211_hw_set(ar->hw, QUEUE_CONTROL);
ieee80211_hw_set(ar->hw, SUPPORTS_TX_FRAG);
ieee80211_hw_set(ar->hw, REPORTS_LOW_ACK);
- ieee80211_hw_set(ar->hw, SUPPORTS_TX_ENCAP_OFFLOAD);
- if (ht_cap & WMI_HT_CAP_ENABLED) {
+
+ if (ath11k_frame_mode == ATH11K_HW_TXRX_ETHERNET) {
+ ieee80211_hw_set(ar->hw, SUPPORTS_TX_ENCAP_OFFLOAD);
+ ieee80211_hw_set(ar->hw, SUPPORTS_RX_DECAP_OFFLOAD);
+ }
+
+ if (cap->nss_ratio_enabled)
+ ieee80211_hw_set(ar->hw, SUPPORTS_VHT_EXT_NSS_BW);
+
+ if ((ht_cap & WMI_HT_CAP_ENABLED) || ar->supports_6ghz) {
ieee80211_hw_set(ar->hw, AMPDU_AGGREGATION);
ieee80211_hw_set(ar->hw, TX_AMPDU_SETUP_IN_HW);
ieee80211_hw_set(ar->hw, SUPPORTS_REORDERING_BUFFER);
@@ -6521,7 +7672,7 @@ static int __ath11k_mac_register(struct ath11k *ar)
* for each band for a dual band capable radio. It will be tricky to
* handle it when the ht capability different for each band.
*/
- if (ht_cap & WMI_HT_CAP_DYNAMIC_SMPS)
+ if (ht_cap & WMI_HT_CAP_DYNAMIC_SMPS || ar->supports_6ghz)
ar->hw->wiphy->features |= NL80211_FEATURE_DYNAMIC_SMPS;
ar->hw->wiphy->max_scan_ssids = WLAN_SCAN_PARAMS_MAX_SSID;
@@ -6590,7 +7741,7 @@ static int __ath11k_mac_register(struct ath11k *ar)
ar->hw->wiphy->interface_modes &= ~BIT(NL80211_IFTYPE_MONITOR);
/* Apply the regd received during initialization */
- ret = ath11k_regd_update(ar, true);
+ ret = ath11k_regd_update(ar);
if (ret) {
ath11k_err(ar->ab, "ath11k regd update failed: %d\n", ret);
goto err_unregister_hw;
@@ -6631,6 +7782,10 @@ int ath11k_mac_register(struct ath11k_base *ab)
if (test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags))
return 0;
+ /* Initialize channel counters frequency value in hertz */
+ ab->cc_freq_hz = IPQ8074_CC_FREQ_HERTZ;
+ ab->free_vdev_map = (1LL << (ab->num_radios * TARGET_NUM_VDEVS)) - 1;
+
for (i = 0; i < ab->num_radios; i++) {
pdev = &ab->pdevs[i];
ar = pdev->ar;
@@ -6641,18 +7796,14 @@ int ath11k_mac_register(struct ath11k_base *ab)
ar->mac_addr[4] += i;
}
+ idr_init(&ar->txmgmt_idr);
+ spin_lock_init(&ar->txmgmt_idr_lock);
+
ret = __ath11k_mac_register(ar);
if (ret)
goto err_cleanup;
-
- idr_init(&ar->txmgmt_idr);
- spin_lock_init(&ar->txmgmt_idr_lock);
}
- /* Initialize channel counters frequency value in hertz */
- ab->cc_freq_hz = IPQ8074_CC_FREQ_HERTZ;
- ab->free_vdev_map = (1LL << (ab->num_radios * TARGET_NUM_VDEVS)) - 1;
-
return 0;
err_cleanup:
@@ -6723,7 +7874,11 @@ int ath11k_mac_allocate(struct ath11k_base *ab)
INIT_WORK(&ar->wmi_mgmt_tx_work, ath11k_mgmt_over_wmi_tx_work);
skb_queue_head_init(&ar->wmi_mgmt_tx_queue);
- clear_bit(ATH11K_FLAG_MONITOR_ENABLED, &ar->monitor_flags);
+
+ clear_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags);
+
+ ar->monitor_vdev_id = -1;
+ clear_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags);
}
return 0;
diff --git a/drivers/net/wireless/ath/ath11k/mac.h b/drivers/net/wireless/ath/ath11k/mac.h
index 4bc59bdaf244..254ca4acc8e8 100644
--- a/drivers/net/wireless/ath/ath11k/mac.h
+++ b/drivers/net/wireless/ath/ath11k/mac.h
@@ -115,6 +115,9 @@ struct ath11k_generic_iter {
#define WMI_MAX_SPATIAL_STREAM 3
#define ATH11K_CHAN_WIDTH_NUM 8
+#define ATH11K_BW_NSS_MAP_ENABLE BIT(31)
+#define ATH11K_PEER_RX_NSS_160MHZ GENMASK(2, 0)
+#define ATH11K_PEER_RX_NSS_80_80MHZ GENMASK(5, 3)
#define ATH11K_OBSS_PD_MAX_THRESHOLD -82
#define ATH11K_OBSS_PD_NON_SRG_MAX_THRESHOLD -62
diff --git a/drivers/net/wireless/ath/ath11k/pci.c b/drivers/net/wireless/ath/ath11k/pci.c
index 5abb38cc3b55..7b3bce0ba76e 100644
--- a/drivers/net/wireless/ath/ath11k/pci.c
+++ b/drivers/net/wireless/ath/ath11k/pci.c
@@ -430,6 +430,8 @@ static void ath11k_pci_force_wake(struct ath11k_base *ab)
static void ath11k_pci_sw_reset(struct ath11k_base *ab, bool power_on)
{
+ mdelay(100);
+
if (power_on) {
ath11k_pci_enable_ltssm(ab);
ath11k_pci_clear_all_intrs(ab);
@@ -439,9 +441,9 @@ static void ath11k_pci_sw_reset(struct ath11k_base *ab, bool power_on)
}
ath11k_mhi_clear_vector(ab);
+ ath11k_pci_clear_dbg_registers(ab);
ath11k_pci_soc_global_reset(ab);
ath11k_mhi_set_mhictrl_reset(ab);
- ath11k_pci_clear_dbg_registers(ab);
}
int ath11k_pci_get_msi_irq(struct device *dev, unsigned int vector)
diff --git a/drivers/net/wireless/ath/ath11k/peer.c b/drivers/net/wireless/ath/ath11k/peer.c
index f49abefa9618..85471f8b3563 100644
--- a/drivers/net/wireless/ath/ath11k/peer.c
+++ b/drivers/net/wireless/ath/ath11k/peer.c
@@ -251,6 +251,7 @@ int ath11k_peer_create(struct ath11k *ar, struct ath11k_vif *arvif,
struct ieee80211_sta *sta, struct peer_create_params *param)
{
struct ath11k_peer *peer;
+ struct ath11k_sta *arsta;
int ret;
lockdep_assert_held(&ar->conf_mutex);
@@ -319,6 +320,16 @@ int ath11k_peer_create(struct ath11k *ar, struct ath11k_vif *arvif,
peer->sec_type = HAL_ENCRYPT_TYPE_OPEN;
peer->sec_type_grp = HAL_ENCRYPT_TYPE_OPEN;
+ if (sta) {
+ arsta = (struct ath11k_sta *)sta->drv_priv;
+ arsta->tcl_metadata |= FIELD_PREP(HTT_TCL_META_DATA_TYPE, 0) |
+ FIELD_PREP(HTT_TCL_META_DATA_PEER_ID,
+ peer->peer_id);
+
+ /* set HTT extension valid bit to 0 by default */
+ arsta->tcl_metadata &= ~HTT_TCL_META_DATA_VALID_HTT;
+ }
+
ar->num_peers++;
spin_unlock_bh(&ar->ab->base_lock);
diff --git a/drivers/net/wireless/ath/ath11k/qmi.c b/drivers/net/wireless/ath/ath11k/qmi.c
index b5e34d670715..8c615bc788ca 100644
--- a/drivers/net/wireless/ath/ath11k/qmi.c
+++ b/drivers/net/wireless/ath/ath11k/qmi.c
@@ -951,6 +951,78 @@ static struct qmi_elem_info qmi_wlanfw_cap_resp_msg_v01_ei[] = {
num_macs),
},
{
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x16,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ voltage_mv_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x16,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ voltage_mv),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x17,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ time_freq_hz_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x17,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ time_freq_hz),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x18,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ otp_version_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x18,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ otp_version),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x19,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ eeprom_read_timeout_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x19,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ eeprom_read_timeout),
+ },
+ {
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
.tlv_type = QMI_COMMON_TLV_TYPE,
@@ -1770,7 +1842,7 @@ static int ath11k_qmi_alloc_target_mem_chunk(struct ath11k_base *ab)
chunk->vaddr = dma_alloc_coherent(ab->dev,
chunk->size,
&chunk->paddr,
- GFP_KERNEL);
+ GFP_KERNEL | __GFP_NOWARN);
if (!chunk->vaddr) {
if (ab->qmi.mem_seg_count <= ATH11K_QMI_FW_MEM_REQ_SEGMENT_CNT) {
ath11k_dbg(ab, ATH11K_DBG_QMI,
@@ -1846,8 +1918,8 @@ static int ath11k_qmi_request_target_cap(struct ath11k_base *ab)
memset(&req, 0, sizeof(req));
memset(&resp, 0, sizeof(resp));
- ret = qmi_txn_init(&ab->qmi.handle, &txn,
- qmi_wlanfw_cap_resp_msg_v01_ei, &resp);
+ ret = qmi_txn_init(&ab->qmi.handle, &txn, qmi_wlanfw_cap_resp_msg_v01_ei,
+ &resp);
if (ret < 0)
goto out;
@@ -1900,6 +1972,12 @@ static int ath11k_qmi_request_target_cap(struct ath11k_base *ab)
strlcpy(ab->qmi.target.fw_build_id, resp.fw_build_id,
sizeof(ab->qmi.target.fw_build_id));
+ if (resp.eeprom_read_timeout_valid) {
+ ab->qmi.target.eeprom_caldata =
+ resp.eeprom_read_timeout;
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi cal data supported from eeprom\n");
+ }
+
ath11k_info(ab, "chip_id 0x%x chip_family 0x%x board_id 0x%x soc_id 0x%x\n",
ab->qmi.target.chip_id, ab->qmi.target.chip_family,
ab->qmi.target.board_id, ab->qmi.target.soc_id);
@@ -1917,98 +1995,73 @@ out:
return ret;
}
-static int
-ath11k_qmi_prepare_bdf_download(struct ath11k_base *ab, int type,
- struct qmi_wlanfw_bdf_download_req_msg_v01 *req,
- void __iomem *bdf_addr)
-{
- const struct firmware *fw_entry;
- struct ath11k_board_data bd;
- u32 fw_size;
- int ret;
-
- switch (type) {
- case ATH11K_QMI_FILE_TYPE_BDF_GOLDEN:
- memset(&bd, 0, sizeof(bd));
-
- ret = ath11k_core_fetch_bdf(ab, &bd);
- if (ret) {
- ath11k_warn(ab, "failed to load board file: %d\n", ret);
- return ret;
- }
-
- fw_size = min_t(u32, ab->hw_params.fw.board_size, bd.len);
- memcpy_toio(bdf_addr, bd.data, fw_size);
- ath11k_core_free_bdf(ab, &bd);
- break;
- case ATH11K_QMI_FILE_TYPE_CALDATA:
- fw_entry = ath11k_core_firmware_request(ab, ATH11K_DEFAULT_CAL_FILE);
- if (IS_ERR(fw_entry)) {
- ret = PTR_ERR(fw_entry);
- ath11k_warn(ab, "failed to load %s: %d\n",
- ATH11K_DEFAULT_CAL_FILE, ret);
- return ret;
- }
-
- fw_size = min_t(u32, ab->hw_params.fw.board_size,
- fw_entry->size);
-
- memcpy_toio(bdf_addr + ATH11K_QMI_CALDATA_OFFSET,
- fw_entry->data, fw_size);
-
- release_firmware(fw_entry);
- break;
- default:
- return -EINVAL;
- }
-
- req->total_size = fw_size;
- return 0;
-}
-
-static int ath11k_qmi_load_bdf_fixed_addr(struct ath11k_base *ab)
+static int ath11k_qmi_load_file_target_mem(struct ath11k_base *ab,
+ const u8 *data, u32 len, u8 type)
{
struct qmi_wlanfw_bdf_download_req_msg_v01 *req;
struct qmi_wlanfw_bdf_download_resp_msg_v01 resp;
struct qmi_txn txn = {};
+ const u8 *temp = data;
void __iomem *bdf_addr = NULL;
- int type, ret;
+ int ret;
+ u32 remaining = len;
req = kzalloc(sizeof(*req), GFP_KERNEL);
if (!req)
return -ENOMEM;
+
memset(&resp, 0, sizeof(resp));
- bdf_addr = ioremap(ab->hw_params.bdf_addr, ATH11K_QMI_BDF_MAX_SIZE);
- if (!bdf_addr) {
- ath11k_warn(ab, "failed ioremap for board file\n");
- ret = -EIO;
- goto out;
+ if (ab->bus_params.fixed_bdf_addr) {
+ bdf_addr = ioremap(ab->hw_params.bdf_addr, ab->hw_params.fw.board_size);
+ if (!bdf_addr) {
+ ath11k_warn(ab, "qmi ioremap error for bdf_addr\n");
+ ret = -EIO;
+ goto err_free_req;
+ }
}
- for (type = 0; type < ATH11K_QMI_MAX_FILE_TYPE; type++) {
+ while (remaining) {
req->valid = 1;
req->file_id_valid = 1;
req->file_id = ab->qmi.target.board_id;
req->total_size_valid = 1;
+ req->total_size = remaining;
req->seg_id_valid = 1;
- req->seg_id = type;
- req->data_valid = 0;
- req->data_len = ATH11K_QMI_MAX_BDF_FILE_NAME_SIZE;
- req->bdf_type = 0;
- req->bdf_type_valid = 0;
+ req->data_valid = 1;
+ req->bdf_type = type;
+ req->bdf_type_valid = 1;
req->end_valid = 1;
- req->end = 1;
+ req->end = 0;
- ret = ath11k_qmi_prepare_bdf_download(ab, type, req, bdf_addr);
- if (ret < 0)
- goto out_qmi_bdf;
+ if (remaining > QMI_WLANFW_MAX_DATA_SIZE_V01) {
+ req->data_len = QMI_WLANFW_MAX_DATA_SIZE_V01;
+ } else {
+ req->data_len = remaining;
+ req->end = 1;
+ }
+
+ if (ab->bus_params.fixed_bdf_addr ||
+ type == ATH11K_QMI_FILE_TYPE_EEPROM) {
+ req->data_valid = 0;
+ req->end = 1;
+ req->data_len = ATH11K_QMI_MAX_BDF_FILE_NAME_SIZE;
+ } else {
+ memcpy(req->data, temp, req->data_len);
+ }
+
+ if (ab->bus_params.fixed_bdf_addr) {
+ if (type == ATH11K_QMI_FILE_TYPE_CALDATA)
+ bdf_addr += ab->hw_params.fw.cal_offset;
+
+ memcpy_toio(bdf_addr, temp, len);
+ }
ret = qmi_txn_init(&ab->qmi.handle, &txn,
qmi_wlanfw_bdf_download_resp_msg_v01_ei,
&resp);
if (ret < 0)
- goto out_qmi_bdf;
+ goto err_iounmap;
ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi bdf download req fixed addr type %d\n",
type);
@@ -2019,54 +2072,62 @@ static int ath11k_qmi_load_bdf_fixed_addr(struct ath11k_base *ab)
qmi_wlanfw_bdf_download_req_msg_v01_ei, req);
if (ret < 0) {
qmi_txn_cancel(&txn);
- goto out_qmi_bdf;
+ goto err_iounmap;
}
ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS));
- if (ret < 0)
- goto out_qmi_bdf;
+ if (ret < 0) {
+ ath11k_warn(ab, "failed to wait board file download request: %d\n",
+ ret);
+ goto err_iounmap;
+ }
if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
ath11k_warn(ab, "board file download request failed: %d %d\n",
resp.resp.result, resp.resp.error);
ret = -EINVAL;
- goto out_qmi_bdf;
+ goto err_iounmap;
+ }
+
+ if (ab->bus_params.fixed_bdf_addr ||
+ type == ATH11K_QMI_FILE_TYPE_EEPROM) {
+ remaining = 0;
+ } else {
+ remaining -= req->data_len;
+ temp += req->data_len;
+ req->seg_id++;
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi bdf download request remaining %i\n",
+ remaining);
}
}
-out_qmi_bdf:
- iounmap(bdf_addr);
-out:
+err_iounmap:
+ if (ab->bus_params.fixed_bdf_addr)
+ iounmap(bdf_addr);
+
+err_free_req:
kfree(req);
+
return ret;
}
static int ath11k_qmi_load_bdf_qmi(struct ath11k_base *ab)
{
- struct qmi_wlanfw_bdf_download_req_msg_v01 *req;
- struct qmi_wlanfw_bdf_download_resp_msg_v01 resp;
+ struct device *dev = ab->dev;
+ char filename[ATH11K_QMI_MAX_BDF_FILE_NAME_SIZE];
+ const struct firmware *fw_entry;
struct ath11k_board_data bd;
- unsigned int remaining;
- struct qmi_txn txn = {};
- int ret;
- const u8 *temp;
- int bdf_type;
-
- req = kzalloc(sizeof(*req), GFP_KERNEL);
- if (!req)
- return -ENOMEM;
- memset(&resp, 0, sizeof(resp));
+ u32 fw_size, file_type;
+ int ret = 0, bdf_type;
+ const u8 *tmp;
memset(&bd, 0, sizeof(bd));
ret = ath11k_core_fetch_bdf(ab, &bd);
if (ret) {
- ath11k_warn(ab, "failed to fetch board file: %d\n", ret);
+ ath11k_warn(ab, "qmi failed to fetch board file: %d\n", ret);
goto out;
}
- temp = bd.data;
- remaining = bd.len;
-
if (bd.len >= SELFMAG && memcmp(bd.data, ELFMAG, SELFMAG) == 0)
bdf_type = ATH11K_QMI_BDF_TYPE_ELF;
else
@@ -2074,67 +2135,61 @@ static int ath11k_qmi_load_bdf_qmi(struct ath11k_base *ab)
ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi bdf_type %d\n", bdf_type);
- while (remaining) {
- req->valid = 1;
- req->file_id_valid = 1;
- req->file_id = ab->qmi.target.board_id;
- req->total_size_valid = 1;
- req->total_size = bd.len;
- req->seg_id_valid = 1;
- req->data_valid = 1;
- req->data_len = ATH11K_QMI_MAX_BDF_FILE_NAME_SIZE;
- req->bdf_type = bdf_type;
- req->bdf_type_valid = 1;
- req->end_valid = 1;
- req->end = 0;
+ fw_size = bd.len;
+ fw_size = min_t(u32, ab->hw_params.fw.board_size, bd.len);
- if (remaining > QMI_WLANFW_MAX_DATA_SIZE_V01) {
- req->data_len = QMI_WLANFW_MAX_DATA_SIZE_V01;
- } else {
- req->data_len = remaining;
- req->end = 1;
- }
+ ret = ath11k_qmi_load_file_target_mem(ab, bd.data, fw_size, bdf_type);
+ if (ret < 0) {
+ ath11k_warn(ab, "qmi failed to load bdf file\n");
+ goto out;
+ }
- memcpy(req->data, temp, req->data_len);
+ /* QCA6390 does not support cal data, skip it */
+ if (bdf_type == ATH11K_QMI_BDF_TYPE_ELF)
+ goto out;
- ret = qmi_txn_init(&ab->qmi.handle, &txn,
- qmi_wlanfw_bdf_download_resp_msg_v01_ei,
- &resp);
- if (ret < 0)
- goto out_qmi_bdf;
+ if (ab->qmi.target.eeprom_caldata) {
+ file_type = ATH11K_QMI_FILE_TYPE_EEPROM;
+ tmp = filename;
+ fw_size = ATH11K_QMI_MAX_BDF_FILE_NAME_SIZE;
+ } else {
+ file_type = ATH11K_QMI_FILE_TYPE_CALDATA;
- ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi bdf download request remaining %i\n",
- remaining);
+ /* cal-<bus>-<id>.bin */
+ snprintf(filename, sizeof(filename), "cal-%s-%s.bin",
+ ath11k_bus_str(ab->hif.bus), dev_name(dev));
+ fw_entry = ath11k_core_firmware_request(ab, filename);
+ if (!IS_ERR(fw_entry))
+ goto success;
- ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
- QMI_WLANFW_BDF_DOWNLOAD_REQ_V01,
- QMI_WLANFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_LEN,
- qmi_wlanfw_bdf_download_req_msg_v01_ei, req);
- if (ret < 0) {
- qmi_txn_cancel(&txn);
- goto out_qmi_bdf;
+ fw_entry = ath11k_core_firmware_request(ab, ATH11K_DEFAULT_CAL_FILE);
+ if (IS_ERR(fw_entry)) {
+ ret = PTR_ERR(fw_entry);
+ ath11k_warn(ab,
+ "qmi failed to load CAL data file:%s\n",
+ filename);
+ goto out;
}
+success:
+ fw_size = min_t(u32, ab->hw_params.fw.board_size, fw_entry->size);
+ tmp = fw_entry->data;
+ }
- ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS));
- if (ret < 0)
- goto out_qmi_bdf;
-
- if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
- ath11k_warn(ab, "bdf download request failed: %d %d\n",
- resp.resp.result, resp.resp.error);
- ret = resp.resp.result;
- goto out_qmi_bdf;
- }
- remaining -= req->data_len;
- temp += req->data_len;
- req->seg_id++;
+ ret = ath11k_qmi_load_file_target_mem(ab, tmp, fw_size, file_type);
+ if (ret < 0) {
+ ath11k_warn(ab, "qmi failed to load caldata\n");
+ goto out_qmi_cal;
}
-out_qmi_bdf:
- ath11k_core_free_bdf(ab, &bd);
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi caldata type: %u\n", file_type);
+out_qmi_cal:
+ if (!ab->qmi.target.eeprom_caldata)
+ release_firmware(fw_entry);
out:
- kfree(req);
+ ath11k_core_free_bdf(ab, &bd);
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi BDF download sequence completed\n");
+
return ret;
}
@@ -2519,10 +2574,7 @@ static int ath11k_qmi_event_load_bdf(struct ath11k_qmi *qmi)
return ret;
}
- if (ab->bus_params.fixed_bdf_addr)
- ret = ath11k_qmi_load_bdf_fixed_addr(ab);
- else
- ret = ath11k_qmi_load_bdf_qmi(ab);
+ ret = ath11k_qmi_load_bdf_qmi(ab);
if (ret < 0) {
ath11k_warn(ab, "failed to load board data file: %d\n", ret);
return ret;
@@ -2707,8 +2759,10 @@ static void ath11k_qmi_driver_event_work(struct work_struct *work)
list_del(&event->list);
spin_unlock(&qmi->event_lock);
- if (test_bit(ATH11K_FLAG_UNREGISTERING, &ab->dev_flags))
+ if (test_bit(ATH11K_FLAG_UNREGISTERING, &ab->dev_flags)) {
+ kfree(event);
return;
+ }
switch (event->type) {
case ATH11K_QMI_EVENT_SERVER_ARRIVE:
diff --git a/drivers/net/wireless/ath/ath11k/qmi.h b/drivers/net/wireless/ath/ath11k/qmi.h
index 3d5930330703..3bb0f9ef7996 100644
--- a/drivers/net/wireless/ath/ath11k/qmi.h
+++ b/drivers/net/wireless/ath/ath11k/qmi.h
@@ -10,11 +10,9 @@
#include <linux/soc/qcom/qmi.h>
#define ATH11K_HOST_VERSION_STRING "WIN"
-#define ATH11K_QMI_WLANFW_TIMEOUT_MS 5000
+#define ATH11K_QMI_WLANFW_TIMEOUT_MS 10000
#define ATH11K_QMI_MAX_BDF_FILE_NAME_SIZE 64
#define ATH11K_QMI_CALDB_ADDRESS 0x4BA00000
-#define ATH11K_QMI_BDF_MAX_SIZE (256 * 1024)
-#define ATH11K_QMI_CALDATA_OFFSET (128 * 1024)
#define ATH11K_QMI_WLANFW_MAX_BUILD_ID_LEN_V01 128
#define ATH11K_QMI_WLFW_SERVICE_ID_V01 0x45
#define ATH11K_QMI_WLFW_SERVICE_VERS_V01 0x01
@@ -44,6 +42,7 @@ struct ath11k_base;
enum ath11k_qmi_file_type {
ATH11K_QMI_FILE_TYPE_BDF_GOLDEN,
ATH11K_QMI_FILE_TYPE_CALDATA,
+ ATH11K_QMI_FILE_TYPE_EEPROM,
ATH11K_QMI_MAX_FILE_TYPE,
};
@@ -104,6 +103,7 @@ struct target_info {
u32 board_id;
u32 soc_id;
u32 fw_version;
+ u32 eeprom_caldata;
char fw_build_timestamp[ATH11K_QMI_WLANFW_MAX_TIMESTAMP_LEN_V01 + 1];
char fw_build_id[ATH11K_QMI_WLANFW_MAX_BUILD_ID_LEN_V01 + 1];
char bdf_ext[ATH11K_QMI_BDF_EXT_STR_LENGTH];
@@ -135,7 +135,7 @@ struct ath11k_qmi {
wait_queue_head_t cold_boot_waitq;
};
-#define QMI_WLANFW_HOST_CAP_REQ_MSG_V01_MAX_LEN 189
+#define QMI_WLANFW_HOST_CAP_REQ_MSG_V01_MAX_LEN 261
#define QMI_WLANFW_HOST_CAP_REQ_V01 0x0034
#define QMI_WLANFW_HOST_CAP_RESP_MSG_V01_MAX_LEN 7
#define QMI_WLFW_HOST_CAP_RESP_V01 0x0034
@@ -285,7 +285,7 @@ struct qmi_wlanfw_fw_cold_cal_done_ind_msg_v01 {
};
#define QMI_WLANFW_CAP_REQ_MSG_V01_MAX_LEN 0
-#define QMI_WLANFW_CAP_RESP_MSG_V01_MAX_LEN 207
+#define QMI_WLANFW_CAP_RESP_MSG_V01_MAX_LEN 235
#define QMI_WLANFW_CAP_REQ_V01 0x0024
#define QMI_WLANFW_CAP_RESP_V01 0x0024
@@ -366,6 +366,14 @@ struct qmi_wlanfw_cap_resp_msg_v01 {
char fw_build_id[ATH11K_QMI_WLANFW_MAX_BUILD_ID_LEN_V01 + 1];
u8 num_macs_valid;
u8 num_macs;
+ u8 voltage_mv_valid;
+ u32 voltage_mv;
+ u8 time_freq_hz_valid;
+ u32 time_freq_hz;
+ u8 otp_version_valid;
+ u32 otp_version;
+ u8 eeprom_read_timeout_valid;
+ u32 eeprom_read_timeout;
};
struct qmi_wlanfw_cap_req_msg_v01 {
diff --git a/drivers/net/wireless/ath/ath11k/reg.c b/drivers/net/wireless/ath/ath11k/reg.c
index e1a1df169034..a66b5bdd2167 100644
--- a/drivers/net/wireless/ath/ath11k/reg.c
+++ b/drivers/net/wireless/ath/ath11k/reg.c
@@ -97,7 +97,6 @@ int ath11k_reg_update_chan_list(struct ath11k *ar)
struct channel_param *ch;
enum nl80211_band band;
int num_channels = 0;
- int params_len;
int i, ret;
bands = hw->wiphy->bands;
@@ -117,10 +116,8 @@ int ath11k_reg_update_chan_list(struct ath11k *ar)
if (WARN_ON(!num_channels))
return -EINVAL;
- params_len = sizeof(struct scan_chan_list_params) +
- num_channels * sizeof(struct channel_param);
- params = kzalloc(params_len, GFP_KERNEL);
-
+ params = kzalloc(struct_size(params, ch_param, num_channels),
+ GFP_KERNEL);
if (!params)
return -ENOMEM;
@@ -198,7 +195,7 @@ static void ath11k_copy_regd(struct ieee80211_regdomain *regd_orig,
sizeof(struct ieee80211_reg_rule));
}
-int ath11k_regd_update(struct ath11k *ar, bool init)
+int ath11k_regd_update(struct ath11k *ar)
{
struct ieee80211_regdomain *regd, *regd_copy = NULL;
int ret, regd_len, pdev_id;
@@ -209,7 +206,10 @@ int ath11k_regd_update(struct ath11k *ar, bool init)
spin_lock_bh(&ab->base_lock);
- if (init) {
+ /* Prefer the latest regd update over default if it's available */
+ if (ab->new_regd[pdev_id]) {
+ regd = ab->new_regd[pdev_id];
+ } else {
/* Apply the regd received during init through
* WMI_REG_CHAN_LIST_CC event. In case of failure to
* receive the regd, initialize with a default world
@@ -222,8 +222,6 @@ int ath11k_regd_update(struct ath11k *ar, bool init)
"failed to receive default regd during init\n");
regd = (struct ieee80211_regdomain *)&ath11k_world_regd;
}
- } else {
- regd = ab->new_regd[pdev_id];
}
if (!regd) {
@@ -683,7 +681,7 @@ void ath11k_regd_update_work(struct work_struct *work)
regd_update_work);
int ret;
- ret = ath11k_regd_update(ar, false);
+ ret = ath11k_regd_update(ar);
if (ret) {
/* Firmware has already moved to the new regd. We need
* to maintain channel consistency across FW, Host driver
diff --git a/drivers/net/wireless/ath/ath11k/reg.h b/drivers/net/wireless/ath/ath11k/reg.h
index 65d56d44796f..5fb9dc03a74e 100644
--- a/drivers/net/wireless/ath/ath11k/reg.h
+++ b/drivers/net/wireless/ath/ath11k/reg.h
@@ -31,6 +31,6 @@ void ath11k_regd_update_work(struct work_struct *work);
struct ieee80211_regdomain *
ath11k_reg_build_regd(struct ath11k_base *ab,
struct cur_regulatory_info *reg_info, bool intersect);
-int ath11k_regd_update(struct ath11k *ar, bool init);
+int ath11k_regd_update(struct ath11k *ar);
int ath11k_reg_update_chan_list(struct ath11k *ar);
#endif
diff --git a/drivers/net/wireless/ath/ath11k/spectral.c b/drivers/net/wireless/ath/ath11k/spectral.c
index 1afe67759659..ac4da99b5577 100644
--- a/drivers/net/wireless/ath/ath11k/spectral.c
+++ b/drivers/net/wireless/ath/ath11k/spectral.c
@@ -11,22 +11,20 @@
#define ATH11K_SPECTRAL_EVENT_TIMEOUT_MS 1
#define ATH11K_SPECTRAL_DWORD_SIZE 4
-/* HW bug, expected BIN size is 2 bytes but HW report as 4 bytes */
-#define ATH11K_SPECTRAL_BIN_SIZE 4
-#define ATH11K_SPECTRAL_ATH11K_MIN_BINS 64
-#define ATH11K_SPECTRAL_ATH11K_MIN_IB_BINS 32
-#define ATH11K_SPECTRAL_ATH11K_MAX_IB_BINS 256
+#define ATH11K_SPECTRAL_MIN_BINS 32
+#define ATH11K_SPECTRAL_MIN_IB_BINS (ATH11K_SPECTRAL_MIN_BINS >> 1)
+#define ATH11K_SPECTRAL_MAX_IB_BINS(x) ((x)->hw_params.spectral.max_fft_bins >> 1)
#define ATH11K_SPECTRAL_SCAN_COUNT_MAX 4095
/* Max channel computed by sum of 2g and 5g band channels */
#define ATH11K_SPECTRAL_TOTAL_CHANNEL 41
#define ATH11K_SPECTRAL_SAMPLES_PER_CHANNEL 70
-#define ATH11K_SPECTRAL_PER_SAMPLE_SIZE (sizeof(struct fft_sample_ath11k) + \
- ATH11K_SPECTRAL_ATH11K_MAX_IB_BINS)
+#define ATH11K_SPECTRAL_PER_SAMPLE_SIZE(x) (sizeof(struct fft_sample_ath11k) + \
+ ATH11K_SPECTRAL_MAX_IB_BINS(x))
#define ATH11K_SPECTRAL_TOTAL_SAMPLE (ATH11K_SPECTRAL_TOTAL_CHANNEL * \
ATH11K_SPECTRAL_SAMPLES_PER_CHANNEL)
-#define ATH11K_SPECTRAL_SUB_BUFF_SIZE ATH11K_SPECTRAL_PER_SAMPLE_SIZE
+#define ATH11K_SPECTRAL_SUB_BUFF_SIZE(x) ATH11K_SPECTRAL_PER_SAMPLE_SIZE(x)
#define ATH11K_SPECTRAL_NUM_SUB_BUF ATH11K_SPECTRAL_TOTAL_SAMPLE
#define ATH11K_SPECTRAL_20MHZ 20
@@ -444,8 +442,8 @@ static ssize_t ath11k_write_file_spectral_bins(struct file *file,
if (kstrtoul(buf, 0, &val))
return -EINVAL;
- if (val < ATH11K_SPECTRAL_ATH11K_MIN_BINS ||
- val > SPECTRAL_ATH11K_MAX_NUM_BINS)
+ if (val < ATH11K_SPECTRAL_MIN_BINS ||
+ val > ar->ab->hw_params.spectral.max_fft_bins)
return -EINVAL;
if (!is_power_of_2(val))
@@ -581,12 +579,12 @@ int ath11k_spectral_process_fft(struct ath11k *ar,
struct spectral_tlv *tlv;
int tlv_len, bin_len, num_bins;
u16 length, freq;
- u8 chan_width_mhz;
+ u8 chan_width_mhz, bin_sz;
int ret;
lockdep_assert_held(&ar->spectral.lock);
- if (!ab->hw_params.spectral_fft_sz) {
+ if (!ab->hw_params.spectral.fft_sz) {
ath11k_warn(ab, "invalid bin size type for hw rev %d\n",
ab->hw_rev);
return -EINVAL;
@@ -596,7 +594,7 @@ int ath11k_spectral_process_fft(struct ath11k *ar,
tlv_len = FIELD_GET(SPECTRAL_TLV_HDR_LEN, __le32_to_cpu(tlv->header));
/* convert Dword into bytes */
tlv_len *= ATH11K_SPECTRAL_DWORD_SIZE;
- bin_len = tlv_len - (sizeof(*fft_report) - sizeof(*tlv));
+ bin_len = tlv_len - ab->hw_params.spectral.fft_hdr_len;
if (data_len < (bin_len + sizeof(*fft_report))) {
ath11k_warn(ab, "mismatch in expected bin len %d and data len %d\n",
@@ -604,12 +602,13 @@ int ath11k_spectral_process_fft(struct ath11k *ar,
return -EINVAL;
}
- num_bins = bin_len / ATH11K_SPECTRAL_BIN_SIZE;
+ bin_sz = ab->hw_params.spectral.fft_sz + ab->hw_params.spectral.fft_pad_sz;
+ num_bins = bin_len / bin_sz;
/* Only In-band bins are useful to user for visualize */
num_bins >>= 1;
- if (num_bins < ATH11K_SPECTRAL_ATH11K_MIN_IB_BINS ||
- num_bins > ATH11K_SPECTRAL_ATH11K_MAX_IB_BINS ||
+ if (num_bins < ATH11K_SPECTRAL_MIN_IB_BINS ||
+ num_bins > ATH11K_SPECTRAL_MAX_IB_BINS(ab) ||
!is_power_of_2(num_bins)) {
ath11k_warn(ab, "Invalid num of bins %d\n", num_bins);
return -EINVAL;
@@ -654,7 +653,7 @@ int ath11k_spectral_process_fft(struct ath11k *ar,
fft_sample->freq2 = __cpu_to_be16(freq);
ath11k_spectral_parse_fft(fft_sample->data, fft_report->bins, num_bins,
- ab->hw_params.spectral_fft_sz);
+ ab->hw_params.spectral.fft_sz);
fft_sample->max_exp = ath11k_spectral_get_max_exp(fft_sample->max_index,
search.peak_mag,
@@ -690,7 +689,7 @@ static int ath11k_spectral_process_data(struct ath11k *ar,
goto unlock;
}
- sample_sz = sizeof(*fft_sample) + ATH11K_SPECTRAL_ATH11K_MAX_IB_BINS;
+ sample_sz = sizeof(*fft_sample) + ATH11K_SPECTRAL_MAX_IB_BINS(ab);
fft_sample = kmalloc(sample_sz, GFP_ATOMIC);
if (!fft_sample) {
ret = -ENOBUFS;
@@ -738,7 +737,8 @@ static int ath11k_spectral_process_data(struct ath11k *ar,
* is 4 DWORD size (16 bytes).
* Need to remove this workaround once HW bug fixed
*/
- tlv_len = sizeof(*summary) - sizeof(*tlv);
+ tlv_len = sizeof(*summary) - sizeof(*tlv) +
+ ab->hw_params.spectral.summary_pad_sz;
if (tlv_len < (sizeof(*summary) - sizeof(*tlv))) {
ath11k_warn(ab, "failed to parse spectral summary at bytes %d tlv_len:%d\n",
@@ -901,7 +901,7 @@ static inline int ath11k_spectral_debug_register(struct ath11k *ar)
ar->spectral.rfs_scan = relay_open("spectral_scan",
ar->debug.debugfs_pdev,
- ATH11K_SPECTRAL_SUB_BUFF_SIZE,
+ ATH11K_SPECTRAL_SUB_BUFF_SIZE(ar->ab),
ATH11K_SPECTRAL_NUM_SUB_BUF,
&rfs_scan_cb, NULL);
if (!ar->spectral.rfs_scan) {
@@ -962,7 +962,7 @@ int ath11k_spectral_init(struct ath11k_base *ab)
ab->wmi_ab.svc_map))
return 0;
- if (!ab->hw_params.spectral_fft_sz)
+ if (!ab->hw_params.spectral.fft_sz)
return 0;
for (i = 0; i < ab->num_radios; i++) {
diff --git a/drivers/net/wireless/ath/ath11k/trace.h b/drivers/net/wireless/ath/ath11k/trace.h
index d2d2a3cb0826..25d18e9d5b0b 100644
--- a/drivers/net/wireless/ath/ath11k/trace.h
+++ b/drivers/net/wireless/ath/ath11k/trace.h
@@ -79,14 +79,15 @@ TRACE_EVENT(ath11k_htt_ppdu_stats,
);
TRACE_EVENT(ath11k_htt_rxdesc,
- TP_PROTO(struct ath11k *ar, const void *data, size_t len),
+ TP_PROTO(struct ath11k *ar, const void *data, size_t log_type, size_t len),
- TP_ARGS(ar, data, len),
+ TP_ARGS(ar, data, log_type, len),
TP_STRUCT__entry(
__string(device, dev_name(ar->ab->dev))
__string(driver, dev_driver_string(ar->ab->dev))
__field(u16, len)
+ __field(u16, log_type)
__dynamic_array(u8, rxdesc, len)
),
@@ -94,14 +95,16 @@ TRACE_EVENT(ath11k_htt_rxdesc,
__assign_str(device, dev_name(ar->ab->dev));
__assign_str(driver, dev_driver_string(ar->ab->dev));
__entry->len = len;
+ __entry->log_type = log_type;
memcpy(__get_dynamic_array(rxdesc), data, len);
),
TP_printk(
- "%s %s rxdesc len %d",
+ "%s %s rxdesc len %d type %d",
__get_str(driver),
__get_str(device),
- __entry->len
+ __entry->len,
+ __entry->log_type
)
);
diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c
index 6c253eae9d06..2d0acfb748cf 100644
--- a/drivers/net/wireless/ath/ath11k/wmi.c
+++ b/drivers/net/wireless/ath/ath11k/wmi.c
@@ -360,6 +360,10 @@ ath11k_pull_mac_phy_cap_svc_ready_ext(struct ath11k_pdev_wmi *wmi_handle,
pdev_cap->he_mcs = mac_phy_caps->he_supp_mcs_5g;
pdev_cap->tx_chain_mask = mac_phy_caps->tx_chain_mask_5g;
pdev_cap->rx_chain_mask = mac_phy_caps->rx_chain_mask_5g;
+ pdev_cap->nss_ratio_enabled =
+ WMI_NSS_RATIO_ENABLE_DISABLE_GET(mac_phy_caps->nss_ratio);
+ pdev_cap->nss_ratio_info =
+ WMI_NSS_RATIO_INFO_GET(mac_phy_caps->nss_ratio);
} else {
return -EINVAL;
}
@@ -403,18 +407,18 @@ ath11k_pull_mac_phy_cap_svc_ready_ext(struct ath11k_pdev_wmi *wmi_handle,
sizeof(u32) * PSOC_HOST_MAX_PHY_SIZE);
memcpy(&cap_band->he_ppet, &mac_phy_caps->he_ppet5g,
sizeof(struct ath11k_ppe_threshold));
- }
- cap_band = &pdev_cap->band[NL80211_BAND_6GHZ];
- cap_band->max_bw_supported = mac_phy_caps->max_bw_supported_5g;
- cap_band->ht_cap_info = mac_phy_caps->ht_cap_info_5g;
- cap_band->he_cap_info[0] = mac_phy_caps->he_cap_info_5g;
- cap_band->he_cap_info[1] = mac_phy_caps->he_cap_info_5g_ext;
- cap_band->he_mcs = mac_phy_caps->he_supp_mcs_5g;
- memcpy(cap_band->he_cap_phy_info, &mac_phy_caps->he_cap_phy_info_5g,
- sizeof(u32) * PSOC_HOST_MAX_PHY_SIZE);
- memcpy(&cap_band->he_ppet, &mac_phy_caps->he_ppet5g,
- sizeof(struct ath11k_ppe_threshold));
+ cap_band = &pdev_cap->band[NL80211_BAND_6GHZ];
+ cap_band->max_bw_supported = mac_phy_caps->max_bw_supported_5g;
+ cap_band->ht_cap_info = mac_phy_caps->ht_cap_info_5g;
+ cap_band->he_cap_info[0] = mac_phy_caps->he_cap_info_5g;
+ cap_band->he_cap_info[1] = mac_phy_caps->he_cap_info_5g_ext;
+ cap_band->he_mcs = mac_phy_caps->he_supp_mcs_5g;
+ memcpy(cap_band->he_cap_phy_info, &mac_phy_caps->he_cap_phy_info_5g,
+ sizeof(u32) * PSOC_HOST_MAX_PHY_SIZE);
+ memcpy(&cap_band->he_ppet, &mac_phy_caps->he_ppet5g,
+ sizeof(struct ath11k_ppe_threshold));
+ }
return 0;
}
@@ -783,14 +787,26 @@ int ath11k_wmi_vdev_down(struct ath11k *ar, u8 vdev_id)
static void ath11k_wmi_put_wmi_channel(struct wmi_channel *chan,
struct wmi_vdev_start_req_arg *arg)
{
+ u32 center_freq1 = arg->channel.band_center_freq1;
+
memset(chan, 0, sizeof(*chan));
chan->mhz = arg->channel.freq;
chan->band_center_freq1 = arg->channel.band_center_freq1;
- if (arg->channel.mode == MODE_11AC_VHT80_80)
+
+ if (arg->channel.mode == MODE_11AX_HE160) {
+ if (arg->channel.freq > arg->channel.band_center_freq1)
+ chan->band_center_freq1 = center_freq1 + 40;
+ else
+ chan->band_center_freq1 = center_freq1 - 40;
+
+ chan->band_center_freq2 = arg->channel.band_center_freq1;
+
+ } else if (arg->channel.mode == MODE_11AC_VHT80_80) {
chan->band_center_freq2 = arg->channel.band_center_freq2;
- else
+ } else {
chan->band_center_freq2 = 0;
+ }
chan->info |= FIELD_PREP(WMI_CHAN_INFO_MODE, arg->channel.mode);
if (arg->channel.passive)
@@ -868,6 +884,8 @@ int ath11k_wmi_vdev_start(struct ath11k *ar, struct wmi_vdev_start_req_arg *arg,
}
cmd->flags |= WMI_VDEV_START_LDPC_RX_ENABLED;
+ if (test_bit(ATH11K_FLAG_HW_CRYPTO_DISABLED, &ar->ab->dev_flags))
+ cmd->flags |= WMI_VDEV_START_HW_ENCRYPTION_DISABLED;
ptr = skb->data + sizeof(*cmd);
chan = ptr;
@@ -1339,6 +1357,7 @@ int ath11k_wmi_pdev_bss_chan_info_request(struct ath11k *ar,
WMI_TAG_PDEV_BSS_CHAN_INFO_REQUEST) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
cmd->req_type = type;
+ cmd->pdev_id = ar->pdev->pdev_id;
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"WMI bss chan info req type %d\n", type);
@@ -1903,8 +1922,8 @@ int ath11k_wmi_send_peer_assoc_cmd(struct ath11k *ar,
FIELD_PREP(WMI_TLV_LEN,
sizeof(*he_mcs) - TLV_HDR_SIZE);
- he_mcs->rx_mcs_set = param->peer_he_rx_mcs_set[i];
- he_mcs->tx_mcs_set = param->peer_he_tx_mcs_set[i];
+ he_mcs->rx_mcs_set = param->peer_he_tx_mcs_set[i];
+ he_mcs->tx_mcs_set = param->peer_he_rx_mcs_set[i];
ptr += sizeof(*he_mcs);
}
@@ -2285,7 +2304,7 @@ int ath11k_wmi_send_scan_chan_list_cmd(struct ath11k *ar,
u16 num_send_chans, num_sends = 0, max_chan_limit = 0;
u32 *reg1, *reg2;
- tchan_info = &chan_list->ch_param[0];
+ tchan_info = chan_list->ch_param;
while (chan_list->nallchans) {
len = sizeof(*cmd) + TLV_HDR_SIZE;
max_chan_limit = (wmi->wmi_ab->max_msg_len[ar->pdev_idx] - len) /
@@ -3495,7 +3514,7 @@ ath11k_wmi_copy_resource_config(struct wmi_resource_config *wmi_cfg,
wmi_cfg->bpf_instruction_size = tg_cfg->bpf_instruction_size;
wmi_cfg->max_bssid_rx_filters = tg_cfg->max_bssid_rx_filters;
wmi_cfg->use_pdev_id = tg_cfg->use_pdev_id;
- wmi_cfg->flag1 = tg_cfg->atf_config;
+ wmi_cfg->flag1 = tg_cfg->flag1;
wmi_cfg->peer_map_unmap_v2_support = tg_cfg->peer_map_unmap_v2_support;
wmi_cfg->sched_params = tg_cfg->sched_params;
wmi_cfg->twt_ap_pdev_count = tg_cfg->twt_ap_pdev_count;
@@ -5234,9 +5253,11 @@ ath11k_wmi_pull_pdev_stats_tx(const struct wmi_pdev_stats_tx *src,
dst->hw_queued = src->hw_queued;
dst->hw_reaped = src->hw_reaped;
dst->underrun = src->underrun;
+ dst->hw_paused = src->hw_paused;
dst->tx_abort = src->tx_abort;
dst->mpdus_requeued = src->mpdus_requeued;
dst->tx_ko = src->tx_ko;
+ dst->tx_xretry = src->tx_xretry;
dst->data_rc = src->data_rc;
dst->self_triggers = src->self_triggers;
dst->sw_retry_failure = src->sw_retry_failure;
@@ -5247,6 +5268,16 @@ ath11k_wmi_pull_pdev_stats_tx(const struct wmi_pdev_stats_tx *src,
dst->stateless_tid_alloc_failure = src->stateless_tid_alloc_failure;
dst->phy_underrun = src->phy_underrun;
dst->txop_ovf = src->txop_ovf;
+ dst->seq_posted = src->seq_posted;
+ dst->seq_failed_queueing = src->seq_failed_queueing;
+ dst->seq_completed = src->seq_completed;
+ dst->seq_restarted = src->seq_restarted;
+ dst->mu_seq_posted = src->mu_seq_posted;
+ dst->mpdus_sw_flush = src->mpdus_sw_flush;
+ dst->mpdus_hw_filter = src->mpdus_hw_filter;
+ dst->mpdus_truncated = src->mpdus_truncated;
+ dst->mpdus_ack_failed = src->mpdus_ack_failed;
+ dst->mpdus_expired = src->mpdus_expired;
}
static void ath11k_wmi_pull_pdev_stats_rx(const struct wmi_pdev_stats_rx *src,
@@ -5266,6 +5297,7 @@ static void ath11k_wmi_pull_pdev_stats_rx(const struct wmi_pdev_stats_rx *src,
dst->phy_errs = src->phy_errs;
dst->phy_err_drop = src->phy_err_drop;
dst->mpdu_errs = src->mpdu_errs;
+ dst->rx_ovfl_errs = src->rx_ovfl_errs;
}
static void
@@ -5503,11 +5535,15 @@ ath11k_wmi_fw_pdev_tx_stats_fill(const struct ath11k_fw_stats_pdev *pdev,
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"Num underruns", pdev->underrun);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Num HW Paused", pdev->hw_paused);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"PPDUs cleaned", pdev->tx_abort);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"MPDUs requeued", pdev->mpdus_requeued);
len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
- "Excessive retries", pdev->tx_ko);
+ "PPDU OK", pdev->tx_ko);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Excessive retries", pdev->tx_xretry);
len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
"HW rate", pdev->data_rc);
len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
@@ -5531,6 +5567,26 @@ ath11k_wmi_fw_pdev_tx_stats_fill(const struct ath11k_fw_stats_pdev *pdev,
"PHY underrun", pdev->phy_underrun);
len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
"MPDU is more than txop limit", pdev->txop_ovf);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Num sequences posted", pdev->seq_posted);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Num seq failed queueing ", pdev->seq_failed_queueing);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Num sequences completed ", pdev->seq_completed);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Num sequences restarted ", pdev->seq_restarted);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Num of MU sequences posted ", pdev->mu_seq_posted);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Num of MPDUS SW flushed ", pdev->mpdus_sw_flush);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Num of MPDUS HW filtered ", pdev->mpdus_hw_filter);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Num of MPDUS truncated ", pdev->mpdus_truncated);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Num of MPDUS ACK failed ", pdev->mpdus_ack_failed);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Num of MPDUS expired ", pdev->mpdus_expired);
*length = len;
}
@@ -5575,6 +5631,8 @@ ath11k_wmi_fw_pdev_rx_stats_fill(const struct ath11k_fw_stats_pdev *pdev,
"PHY errors drops", pdev->phy_err_drop);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"MPDU errors (FCS, MIC, ENC)", pdev->mpdu_errs);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Overflow errors", pdev->rx_ovfl_errs);
*length = len;
}
@@ -5792,6 +5850,17 @@ static int ath11k_reg_chan_list_event(struct ath11k_base *ab, struct sk_buff *sk
pdev_idx = reg_info->phy_id;
+ /* Avoid default reg rule updates sent during FW recovery if
+ * it is already available
+ */
+ spin_lock(&ab->base_lock);
+ if (test_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags) &&
+ ab->default_regd[pdev_idx]) {
+ spin_unlock(&ab->base_lock);
+ goto mem_free;
+ }
+ spin_unlock(&ab->base_lock);
+
if (pdev_idx >= ab->num_radios) {
/* Process the event for phy0 only if single_pdev_only
* is true. If pdev_idx is valid but not 0, discard the
@@ -5829,10 +5898,10 @@ static int ath11k_reg_chan_list_event(struct ath11k_base *ab, struct sk_buff *sk
}
spin_lock(&ab->base_lock);
- if (test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags)) {
- /* Once mac is registered, ar is valid and all CC events from
- * fw is considered to be received due to user requests
- * currently.
+ if (ab->default_regd[pdev_idx]) {
+ /* The initial rules from FW after WMI Init is to build
+ * the default regd. From then on, any rules updated for
+ * the pdev could be due to user reg changes.
* Free previously built regd before assigning the newly
* generated regd to ar. NULL pointer handling will be
* taken care by kfree itself.
@@ -5842,13 +5911,9 @@ static int ath11k_reg_chan_list_event(struct ath11k_base *ab, struct sk_buff *sk
ab->new_regd[pdev_idx] = regd;
ieee80211_queue_work(ar->hw, &ar->regd_update_work);
} else {
- /* Multiple events for the same *ar is not expected. But we
- * can still clear any previously stored default_regd if we
- * are receiving this event for the same radio by mistake.
- * NULL pointer handling will be taken care by kfree itself.
+ /* This regd would be applied during mac registration and is
+ * held constant throughout for regd intersection purpose
*/
- kfree(ab->default_regd[pdev_idx]);
- /* This regd would be applied during mac registration */
ab->default_regd[pdev_idx] = regd;
}
ab->dfs_region = reg_info->dfs_region;
@@ -6119,8 +6184,10 @@ static void ath11k_mgmt_rx_event(struct ath11k_base *ab, struct sk_buff *skb)
if (rx_ev.status & WMI_RX_STATUS_ERR_MIC)
status->flag |= RX_FLAG_MMIC_ERROR;
- if (rx_ev.chan_freq >= ATH11K_MIN_6G_FREQ) {
+ if (rx_ev.chan_freq >= ATH11K_MIN_6G_FREQ &&
+ rx_ev.chan_freq <= ATH11K_MAX_6G_FREQ) {
status->band = NL80211_BAND_6GHZ;
+ status->freq = rx_ev.chan_freq;
} else if (rx_ev.channel >= 1 && rx_ev.channel <= 14) {
status->band = NL80211_BAND_2GHZ;
} else if (rx_ev.channel >= 36 && rx_ev.channel <= ATH11K_MAX_5G_CHAN) {
@@ -6141,8 +6208,10 @@ static void ath11k_mgmt_rx_event(struct ath11k_base *ab, struct sk_buff *skb)
sband = &ar->mac.sbands[status->band];
- status->freq = ieee80211_channel_to_frequency(rx_ev.channel,
- status->band);
+ if (status->band != NL80211_BAND_6GHZ)
+ status->freq = ieee80211_channel_to_frequency(rx_ev.channel,
+ status->band);
+
status->signal = rx_ev.snr + ATH11K_DEFAULT_NOISE_FLOOR;
status->rate_idx = ath11k_mac_bitrate_to_idx(sband, rx_ev.rate / 100);
@@ -6220,8 +6289,9 @@ exit:
rcu_read_unlock();
}
-static struct ath11k *ath11k_get_ar_on_scan_abort(struct ath11k_base *ab,
- u32 vdev_id)
+static struct ath11k *ath11k_get_ar_on_scan_state(struct ath11k_base *ab,
+ u32 vdev_id,
+ enum ath11k_scan_state state)
{
int i;
struct ath11k_pdev *pdev;
@@ -6233,7 +6303,7 @@ static struct ath11k *ath11k_get_ar_on_scan_abort(struct ath11k_base *ab,
ar = pdev->ar;
spin_lock_bh(&ar->data_lock);
- if (ar->scan.state == ATH11K_SCAN_ABORTING &&
+ if (ar->scan.state == state &&
ar->scan.vdev_id == vdev_id) {
spin_unlock_bh(&ar->data_lock);
return ar;
@@ -6263,10 +6333,15 @@ static void ath11k_scan_event(struct ath11k_base *ab, struct sk_buff *skb)
* aborting scan's vdev id matches this event info.
*/
if (scan_ev.event_type == WMI_SCAN_EVENT_COMPLETED &&
- scan_ev.reason == WMI_SCAN_REASON_CANCELLED)
- ar = ath11k_get_ar_on_scan_abort(ab, scan_ev.vdev_id);
- else
+ scan_ev.reason == WMI_SCAN_REASON_CANCELLED) {
+ ar = ath11k_get_ar_on_scan_state(ab, scan_ev.vdev_id,
+ ATH11K_SCAN_ABORTING);
+ if (!ar)
+ ar = ath11k_get_ar_on_scan_state(ab, scan_ev.vdev_id,
+ ATH11K_SCAN_RUNNING);
+ } else {
ar = ath11k_mac_get_ar_by_vdev_id(ab, scan_ev.vdev_id);
+ }
if (!ar) {
ath11k_warn(ab, "Received scan event for unknown vdev");
@@ -6301,6 +6376,8 @@ static void ath11k_scan_event(struct ath11k_base *ab, struct sk_buff *skb)
ath11k_wmi_event_scan_start_failed(ar);
break;
case WMI_SCAN_EVENT_DEQUEUED:
+ __ath11k_mac_scan_finish(ar);
+ break;
case WMI_SCAN_EVENT_PREEMPTED:
case WMI_SCAN_EVENT_RESTARTED:
case WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT:
@@ -7065,6 +7142,7 @@ static void ath11k_wmi_tlv_op_rx(struct ath11k_base *ab, struct sk_buff *skb)
case WMI_TWT_ENABLE_EVENTID:
case WMI_TWT_DISABLE_EVENTID:
case WMI_PDEV_DMA_RING_CFG_RSP_EVENTID:
+ case WMI_PEER_CREATE_CONF_EVENTID:
ath11k_dbg(ab, ATH11K_DBG_WMI,
"ignoring unsupported event 0x%x\n", id);
break;
diff --git a/drivers/net/wireless/ath/ath11k/wmi.h b/drivers/net/wireless/ath/ath11k/wmi.h
index d35c47e0b19d..0584e68e7593 100644
--- a/drivers/net/wireless/ath/ath11k/wmi.h
+++ b/drivers/net/wireless/ath/ath11k/wmi.h
@@ -119,6 +119,22 @@ enum {
WMI_HOST_WLAN_2G_5G_CAP = 0x3,
};
+/* Parameters used for WMI_VDEV_PARAM_AUTORATE_MISC_CFG command.
+ * Used only for HE auto rate mode.
+ */
+enum {
+ /* HE LTF related configuration */
+ WMI_HE_AUTORATE_LTF_1X = BIT(0),
+ WMI_HE_AUTORATE_LTF_2X = BIT(1),
+ WMI_HE_AUTORATE_LTF_4X = BIT(2),
+
+ /* HE GI related configuration */
+ WMI_AUTORATE_400NS_GI = BIT(8),
+ WMI_AUTORATE_800NS_GI = BIT(9),
+ WMI_AUTORATE_1600NS_GI = BIT(10),
+ WMI_AUTORATE_3200NS_GI = BIT(11),
+};
+
/*
* wmi command groups.
*/
@@ -647,6 +663,9 @@ enum wmi_tlv_event_id {
WMI_PEER_RESERVED9_EVENTID,
WMI_PEER_RESERVED10_EVENTID,
WMI_PEER_OPER_MODE_CHANGE_EVENTID,
+ WMI_PEER_TX_PN_RESPONSE_EVENTID,
+ WMI_PEER_CFR_CAPTURE_EVENTID,
+ WMI_PEER_CREATE_CONF_EVENTID,
WMI_MGMT_RX_EVENTID = WMI_TLV_CMD(WMI_GRP_MGMT),
WMI_HOST_SWBA_EVENTID,
WMI_TBTTOFFSET_UPDATE_EVENTID,
@@ -1044,7 +1063,9 @@ enum wmi_tlv_vdev_param {
WMI_VDEV_PARAM_HE_RANGE_EXT,
WMI_VDEV_PARAM_ENABLE_BCAST_PROBE_RESPONSE,
WMI_VDEV_PARAM_FILS_MAX_CHANNEL_GUARD_TIME,
+ WMI_VDEV_PARAM_HE_LTF = 0x74,
WMI_VDEV_PARAM_BA_MODE = 0x7e,
+ WMI_VDEV_PARAM_AUTORATE_MISC_CFG = 0x80,
WMI_VDEV_PARAM_SET_HE_SOUNDING_MODE = 0x87,
WMI_VDEV_PARAM_6GHZ_PARAMS = 0x99,
WMI_VDEV_PARAM_PROTOTYPE = 0x8000,
@@ -2128,6 +2149,24 @@ enum wmi_direct_buffer_module {
WMI_DIRECT_BUF_MAX
};
+/* enum wmi_nss_ratio - NSS ratio received from FW during service ready ext
+ * event
+ * WMI_NSS_RATIO_1BY2_NSS -Max nss of 160MHz is equals to half of the max nss
+ * of 80MHz
+ * WMI_NSS_RATIO_3BY4_NSS - Max nss of 160MHz is equals to 3/4 of the max nss
+ * of 80MHz
+ * WMI_NSS_RATIO_1_NSS - Max nss of 160MHz is equals to the max nss of 80MHz
+ * WMI_NSS_RATIO_2_NSS - Max nss of 160MHz is equals to two times the max
+ * nss of 80MHz
+ */
+
+enum wmi_nss_ratio {
+ WMI_NSS_RATIO_1BY2_NSS = 0x0,
+ WMI_NSS_RATIO_3BY4_NSS = 0x1,
+ WMI_NSS_RATIO_1_NSS = 0x2,
+ WMI_NSS_RATIO_2_NSS = 0x3,
+};
+
struct wmi_host_pdev_band_to_mac {
u32 pdev_id;
u32 start_freq;
@@ -2244,6 +2283,8 @@ struct wmi_init_cmd {
u32 num_host_mem_chunks;
} __packed;
+#define WMI_RSRC_CFG_FLAG1_BSS_CHANNEL_INFO_64 BIT(5)
+
struct wmi_resource_config {
u32 tlv_header;
u32 num_vdevs;
@@ -2370,6 +2411,12 @@ struct wmi_hw_mode_capabilities {
} __packed;
#define WMI_MAX_HECAP_PHY_SIZE (3)
+#define WMI_NSS_RATIO_ENABLE_DISABLE_BITPOS BIT(0)
+#define WMI_NSS_RATIO_ENABLE_DISABLE_GET(_val) \
+ FIELD_GET(WMI_NSS_RATIO_ENABLE_DISABLE_BITPOS, _val)
+#define WMI_NSS_RATIO_INFO_BITPOS GENMASK(4, 1)
+#define WMI_NSS_RATIO_INFO_GET(_val) \
+ FIELD_GET(WMI_NSS_RATIO_INFO_BITPOS, _val)
struct wmi_mac_phy_capabilities {
u32 hw_mode_id;
@@ -2403,6 +2450,12 @@ struct wmi_mac_phy_capabilities {
u32 he_cap_info_2g_ext;
u32 he_cap_info_5g_ext;
u32 he_cap_info_internal;
+ u32 wireless_modes;
+ u32 low_2ghz_chan_freq;
+ u32 high_2ghz_chan_freq;
+ u32 low_5ghz_chan_freq;
+ u32 high_5ghz_chan_freq;
+ u32 nss_ratio;
} __packed;
struct wmi_hal_reg_capabilities_ext {
@@ -2527,6 +2580,7 @@ struct wmi_vdev_down_cmd {
#define WMI_VDEV_START_HIDDEN_SSID BIT(0)
#define WMI_VDEV_START_PMF_ENABLED BIT(1)
#define WMI_VDEV_START_LDPC_RX_ENABLED BIT(3)
+#define WMI_VDEV_START_HW_ENCRYPTION_DISABLED BIT(4)
struct wmi_ssid {
u32 ssid_len;
@@ -2960,6 +3014,7 @@ struct wmi_pdev_bss_chan_info_req_cmd {
u32 tlv_header;
/* ref wmi_bss_chan_info_req_type */
u32 req_type;
+ u32 pdev_id;
} __packed;
struct wmi_ap_ps_peer_cmd {
@@ -3608,7 +3663,7 @@ struct wmi_stop_scan_cmd {
struct scan_chan_list_params {
u32 pdev_id;
u16 nallchans;
- struct channel_param ch_param[1];
+ struct channel_param ch_param[];
};
struct wmi_scan_chan_list_cmd {
@@ -3917,7 +3972,11 @@ struct wmi_vht_rate_set {
struct wmi_he_rate_set {
u32 tlv_header;
+
+ /* MCS at which the peer can receive */
u32 rx_mcs_set;
+
+ /* MCS at which the peer can transmit */
u32 tx_mcs_set;
} __packed;
@@ -4056,7 +4115,6 @@ struct wmi_vdev_stopped_event {
} __packed;
struct wmi_pdev_bss_chan_info_event {
- u32 pdev_id;
u32 freq; /* Units in MHz */
u32 noise_floor; /* units are dBm */
/* rx clear - how often the channel was unused */
@@ -4074,6 +4132,7 @@ struct wmi_pdev_bss_chan_info_event {
/*rx_cycle cnt for my bss in 64bits format */
u32 rx_bss_cycle_count_low;
u32 rx_bss_cycle_count_high;
+ u32 pdev_id;
} __packed;
#define WMI_VDEV_INSTALL_KEY_COMPL_STATUS_SUCCESS 0
@@ -4168,6 +4227,9 @@ struct wmi_pdev_stats_tx {
/* Num underruns */
s32 underrun;
+ /* Num hw paused */
+ u32 hw_paused;
+
/* Num PPDUs cleaned up in TX abort */
s32 tx_abort;
@@ -4177,6 +4239,8 @@ struct wmi_pdev_stats_tx {
/* excessive retries */
u32 tx_ko;
+ u32 tx_xretry;
+
/* data hw rate code */
u32 data_rc;
@@ -4206,6 +4270,40 @@ struct wmi_pdev_stats_tx {
/* MPDU is more than txop limit */
u32 txop_ovf;
+
+ /* Num sequences posted */
+ u32 seq_posted;
+
+ /* Num sequences failed in queueing */
+ u32 seq_failed_queueing;
+
+ /* Num sequences completed */
+ u32 seq_completed;
+
+ /* Num sequences restarted */
+ u32 seq_restarted;
+
+ /* Num of MU sequences posted */
+ u32 mu_seq_posted;
+
+ /* Num MPDUs flushed by SW, HWPAUSED, SW TXABORT
+ * (Reset,channel change)
+ */
+ s32 mpdus_sw_flush;
+
+ /* Num MPDUs filtered by HW, all filter condition (TTL expired) */
+ s32 mpdus_hw_filter;
+
+ /* Num MPDUs truncated by PDG (TXOP, TBTT,
+ * PPDU_duration based on rate, dyn_bw)
+ */
+ s32 mpdus_truncated;
+
+ /* Num MPDUs that was tried but didn't receive ACK or BA */
+ s32 mpdus_ack_failed;
+
+ /* Num MPDUs that was dropped du to expiry. */
+ s32 mpdus_expired;
} __packed;
struct wmi_pdev_stats_rx {
@@ -4240,6 +4338,9 @@ struct wmi_pdev_stats_rx {
/* Number of mpdu errors - FCS, MIC, ENC etc. */
s32 mpdu_errs;
+
+ /* Num overflow errors */
+ s32 rx_ovfl_errs;
} __packed;
struct wmi_pdev_stats {
@@ -5014,7 +5115,7 @@ struct target_resource_config {
u32 vo_minfree;
u32 rx_batchmode;
u32 tt_support;
- u32 atf_config;
+ u32 flag1;
u32 iphdr_pad_config;
u32 qwrap_config:16,
alloc_frag_desc_for_data_pkt:16;
diff --git a/drivers/net/wireless/ath/ath9k/ath9k_pci_owl_loader.c b/drivers/net/wireless/ath/ath9k/ath9k_pci_owl_loader.c
index 56d1a7764b9f..708c8969b503 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k_pci_owl_loader.c
+++ b/drivers/net/wireless/ath/ath9k/ath9k_pci_owl_loader.c
@@ -19,9 +19,14 @@
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/ath9k_platform.h>
+#include <linux/nvmem-consumer.h>
+#include <linux/workqueue.h>
struct owl_ctx {
+ struct pci_dev *pdev;
struct completion eeprom_load;
+ struct work_struct work;
+ struct nvmem_cell *cell;
};
#define EEPROM_FILENAME_LEN 100
@@ -42,6 +47,12 @@ static int ath9k_pci_fixup(struct pci_dev *pdev, const u16 *cal_data,
u32 bar0;
bool swap_needed = false;
+ /* also note that we are doing *u16 operations on the file */
+ if (cal_len > 4096 || cal_len < 0x200 || (cal_len & 1) == 1) {
+ dev_err(&pdev->dev, "eeprom has an invalid size.\n");
+ return -EINVAL;
+ }
+
if (*cal_data != AR5416_EEPROM_MAGIC) {
if (*cal_data != swab16(AR5416_EEPROM_MAGIC)) {
dev_err(&pdev->dev, "invalid calibration data\n");
@@ -99,38 +110,31 @@ static int ath9k_pci_fixup(struct pci_dev *pdev, const u16 *cal_data,
return 0;
}
-static void owl_fw_cb(const struct firmware *fw, void *context)
+static void owl_rescan(struct pci_dev *pdev)
{
- struct pci_dev *pdev = (struct pci_dev *)context;
- struct owl_ctx *ctx = (struct owl_ctx *)pci_get_drvdata(pdev);
- struct pci_bus *bus;
-
- complete(&ctx->eeprom_load);
-
- if (!fw) {
- dev_err(&pdev->dev, "no eeprom data received.\n");
- goto release;
- }
-
- /* also note that we are doing *u16 operations on the file */
- if (fw->size > 4096 || fw->size < 0x200 || (fw->size & 1) == 1) {
- dev_err(&pdev->dev, "eeprom file has an invalid size.\n");
- goto release;
- }
-
- if (ath9k_pci_fixup(pdev, (const u16 *)fw->data, fw->size))
- goto release;
+ struct pci_bus *bus = pdev->bus;
pci_lock_rescan_remove();
- bus = pdev->bus;
pci_stop_and_remove_bus_device(pdev);
/* the device should come back with the proper
* ProductId. But we have to initiate a rescan.
*/
pci_rescan_bus(bus);
pci_unlock_rescan_remove();
+}
+
+static void owl_fw_cb(const struct firmware *fw, void *context)
+{
+ struct owl_ctx *ctx = (struct owl_ctx *)context;
+
+ complete(&ctx->eeprom_load);
-release:
+ if (fw) {
+ ath9k_pci_fixup(ctx->pdev, (const u16 *)fw->data, fw->size);
+ owl_rescan(ctx->pdev);
+ } else {
+ dev_err(&ctx->pdev->dev, "no eeprom data received.\n");
+ }
release_firmware(fw);
}
@@ -152,6 +156,43 @@ static const char *owl_get_eeprom_name(struct pci_dev *pdev)
return eeprom_name;
}
+static void owl_nvmem_work(struct work_struct *work)
+{
+ struct owl_ctx *ctx = container_of(work, struct owl_ctx, work);
+ void *buf;
+ size_t len;
+
+ complete(&ctx->eeprom_load);
+
+ buf = nvmem_cell_read(ctx->cell, &len);
+ if (!IS_ERR(buf)) {
+ ath9k_pci_fixup(ctx->pdev, buf, len);
+ kfree(buf);
+ owl_rescan(ctx->pdev);
+ } else {
+ dev_err(&ctx->pdev->dev, "no nvmem data received.\n");
+ }
+}
+
+static int owl_nvmem_probe(struct owl_ctx *ctx)
+{
+ int err;
+
+ ctx->cell = devm_nvmem_cell_get(&ctx->pdev->dev, "calibration");
+ if (IS_ERR(ctx->cell)) {
+ err = PTR_ERR(ctx->cell);
+ if (err == -ENOENT || err == -EOPNOTSUPP)
+ return 1; /* not present, try firmware_request */
+
+ return err;
+ }
+
+ INIT_WORK(&ctx->work, owl_nvmem_work);
+ schedule_work(&ctx->work);
+
+ return 0;
+}
+
static int owl_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
@@ -164,21 +205,27 @@ static int owl_probe(struct pci_dev *pdev,
pcim_pin_device(pdev);
- eeprom_name = owl_get_eeprom_name(pdev);
- if (!eeprom_name) {
- dev_err(&pdev->dev, "no eeprom filename found.\n");
- return -ENODEV;
- }
-
ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
init_completion(&ctx->eeprom_load);
+ ctx->pdev = pdev;
pci_set_drvdata(pdev, ctx);
+
+ err = owl_nvmem_probe(ctx);
+ if (err <= 0)
+ return err;
+
+ eeprom_name = owl_get_eeprom_name(pdev);
+ if (!eeprom_name) {
+ dev_err(&pdev->dev, "no eeprom filename found.\n");
+ return -ENODEV;
+ }
+
err = request_firmware_nowait(THIS_MODULE, true, eeprom_name,
- &pdev->dev, GFP_KERNEL, pdev, owl_fw_cb);
+ &pdev->dev, GFP_KERNEL, ctx, owl_fw_cb);
if (err)
dev_err(&pdev->dev, "failed to request caldata (%d).\n", err);
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.c b/drivers/net/wireless/ath/ath9k/eeprom.c
index c22d457dbc54..e6b3cd49ea18 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom.c
@@ -135,13 +135,23 @@ static bool ath9k_hw_nvram_read_firmware(const struct firmware *eeprom_blob,
offset, data);
}
+static bool ath9k_hw_nvram_read_nvmem(struct ath_hw *ah, off_t offset,
+ u16 *data)
+{
+ return ath9k_hw_nvram_read_array(ah->nvmem_blob,
+ ah->nvmem_blob_len / sizeof(u16),
+ offset, data);
+}
+
bool ath9k_hw_nvram_read(struct ath_hw *ah, u32 off, u16 *data)
{
struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_platform_data *pdata = ah->dev->platform_data;
bool ret;
- if (ah->eeprom_blob)
+ if (ah->nvmem_blob)
+ ret = ath9k_hw_nvram_read_nvmem(ah, off, data);
+ else if (ah->eeprom_blob)
ret = ath9k_hw_nvram_read_firmware(ah->eeprom_blob, off, data);
else if (pdata && !pdata->use_eeprom)
ret = ath9k_hw_nvram_read_pdata(pdata, off, data);
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index b7b65b1c90e8..096a206f49ed 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -977,6 +977,8 @@ struct ath_hw {
bool disable_5ghz;
const struct firmware *eeprom_blob;
+ u16 *nvmem_blob; /* devres managed */
+ size_t nvmem_blob_len;
struct ath_dynack dynack;
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index e9a36dd7144f..1568730fc01e 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -22,6 +22,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_net.h>
+#include <linux/nvmem-consumer.h>
#include <linux/relay.h>
#include <linux/dmi.h>
#include <net/ieee80211_radiotap.h>
@@ -568,6 +569,57 @@ static void ath9k_eeprom_release(struct ath_softc *sc)
release_firmware(sc->sc_ah->eeprom_blob);
}
+static int ath9k_nvmem_request_eeprom(struct ath_softc *sc)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct nvmem_cell *cell;
+ void *buf;
+ size_t len;
+ int err;
+
+ cell = devm_nvmem_cell_get(sc->dev, "calibration");
+ if (IS_ERR(cell)) {
+ err = PTR_ERR(cell);
+
+ /* nvmem cell might not be defined, or the nvmem
+ * subsystem isn't included. In this case, follow
+ * the established "just return 0;" convention of
+ * ath9k_init_platform to say:
+ * "All good. Nothing to see here. Please go on."
+ */
+ if (err == -ENOENT || err == -EOPNOTSUPP)
+ return 0;
+
+ return err;
+ }
+
+ buf = nvmem_cell_read(cell, &len);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+
+ /* run basic sanity checks on the returned nvram cell length.
+ * That length has to be a multiple of a "u16" (i.e.: & 1).
+ * Furthermore, it has to be more than "let's say" 512 bytes
+ * but less than the maximum of AR9300_EEPROM_SIZE (16kb).
+ */
+ if ((len & 1) == 1 || len < 512 || len >= AR9300_EEPROM_SIZE) {
+ kfree(buf);
+ return -EINVAL;
+ }
+
+ /* devres manages the calibration values release on shutdown */
+ ah->nvmem_blob = (u16 *)devm_kmemdup(sc->dev, buf, len, GFP_KERNEL);
+ kfree(buf);
+ if (IS_ERR(ah->nvmem_blob))
+ return PTR_ERR(ah->nvmem_blob);
+
+ ah->nvmem_blob_len = len;
+ ah->ah_flags &= ~AH_USE_EEPROM;
+ ah->ah_flags |= AH_NO_EEP_SWAP;
+
+ return 0;
+}
+
static int ath9k_init_platform(struct ath_softc *sc)
{
struct ath9k_platform_data *pdata = sc->dev->platform_data;
@@ -704,6 +756,10 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
if (ret)
return ret;
+ ret = ath9k_nvmem_request_eeprom(sc);
+ if (ret)
+ return ret;
+
if (ath9k_led_active_high != -1)
ah->config.led_active_high = ath9k_led_active_high == 1;
diff --git a/drivers/net/wireless/ath/spectral_common.h b/drivers/net/wireless/ath/spectral_common.h
index 9c2e5458e425..e14f374f97d4 100644
--- a/drivers/net/wireless/ath/spectral_common.h
+++ b/drivers/net/wireless/ath/spectral_common.h
@@ -24,7 +24,6 @@
* could be acquired so far.
*/
#define SPECTRAL_ATH10K_MAX_NUM_BINS 256
-#define SPECTRAL_ATH11K_MAX_NUM_BINS 512
/* FFT sample format given to userspace via debugfs.
*
diff --git a/drivers/net/wireless/ath/wcn36xx/debug.c b/drivers/net/wireless/ath/wcn36xx/debug.c
index 389b5e7129a6..6af306ae41ad 100644
--- a/drivers/net/wireless/ath/wcn36xx/debug.c
+++ b/drivers/net/wireless/ath/wcn36xx/debug.c
@@ -120,7 +120,7 @@ static ssize_t write_file_dump(struct file *file,
if (begin == NULL)
break;
- if (kstrtou32(begin, 0, &arg[i]) != 0)
+ if (kstrtos32(begin, 0, &arg[i]) != 0)
break;
}
diff --git a/drivers/net/wireless/ath/wcn36xx/hal.h b/drivers/net/wireless/ath/wcn36xx/hal.h
index 455143c4164e..5f1f2480459a 100644
--- a/drivers/net/wireless/ath/wcn36xx/hal.h
+++ b/drivers/net/wireless/ath/wcn36xx/hal.h
@@ -3384,11 +3384,11 @@ struct tl_hal_flush_ac_rsp_msg {
struct wcn36xx_hal_enter_imps_req_msg {
struct wcn36xx_hal_msg_header header;
-};
+} __packed;
-struct wcn36xx_hal_exit_imps_req {
+struct wcn36xx_hal_exit_imps_req_msg {
struct wcn36xx_hal_msg_header header;
-};
+} __packed;
struct wcn36xx_hal_enter_bmps_req_msg {
struct wcn36xx_hal_msg_header header;
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
index ec913ec991f3..263af65a889a 100644
--- a/drivers/net/wireless/ath/wcn36xx/main.c
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@ -432,6 +432,13 @@ static int wcn36xx_config(struct ieee80211_hw *hw, u32 changed)
if (changed & IEEE80211_CONF_CHANGE_PS)
wcn36xx_change_ps(wcn, hw->conf.flags & IEEE80211_CONF_PS);
+ if (changed & IEEE80211_CONF_CHANGE_IDLE) {
+ if (hw->conf.flags & IEEE80211_CONF_IDLE)
+ wcn36xx_smd_enter_imps(wcn);
+ else
+ wcn36xx_smd_exit_imps(wcn);
+ }
+
mutex_unlock(&wcn->conf_mutex);
return 0;
@@ -569,12 +576,14 @@ static int wcn36xx_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
if (IEEE80211_KEY_FLAG_PAIRWISE & key_conf->flags) {
sta_priv->is_data_encrypted = true;
/* Reconfigure bss with encrypt_type */
- if (NL80211_IFTYPE_STATION == vif->type)
+ if (NL80211_IFTYPE_STATION == vif->type) {
wcn36xx_smd_config_bss(wcn,
vif,
sta,
sta->addr,
true);
+ wcn36xx_smd_config_sta(wcn, vif, sta);
+ }
wcn36xx_smd_set_stakey(wcn,
vif_priv->encrypt_type,
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c
index 57fa857b290b..3979171c92dd 100644
--- a/drivers/net/wireless/ath/wcn36xx/smd.c
+++ b/drivers/net/wireless/ath/wcn36xx/smd.c
@@ -2184,6 +2184,59 @@ out:
return ret;
}
+int wcn36xx_smd_enter_imps(struct wcn36xx *wcn)
+{
+ struct wcn36xx_hal_enter_imps_req_msg msg_body;
+ int ret;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_ENTER_IMPS_REQ);
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_enter_imps failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_enter_imps response failed err=%d\n", ret);
+ goto out;
+ }
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL, "Entered idle mode\n");
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_exit_imps(struct wcn36xx *wcn)
+{
+ struct wcn36xx_hal_exit_imps_req_msg msg_body;
+ int ret;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_EXIT_IMPS_REQ);
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_exit_imps failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_exit_imps response failed err=%d\n", ret);
+ goto out;
+ }
+ wcn36xx_dbg(WCN36XX_DBG_HAL, "Exited idle mode\n");
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
int wcn36xx_smd_set_power_params(struct wcn36xx *wcn, bool ignore_dtim)
{
struct wcn36xx_hal_set_power_params_req_msg msg_body;
@@ -2623,30 +2676,52 @@ static int wcn36xx_smd_delete_sta_context_ind(struct wcn36xx *wcn,
size_t len)
{
struct wcn36xx_hal_delete_sta_context_ind_msg *rsp = buf;
- struct wcn36xx_vif *tmp;
+ struct wcn36xx_vif *vif_priv;
+ struct ieee80211_vif *vif;
+ struct ieee80211_bss_conf *bss_conf;
struct ieee80211_sta *sta;
+ bool found = false;
if (len != sizeof(*rsp)) {
wcn36xx_warn("Corrupted delete sta indication\n");
return -EIO;
}
- wcn36xx_dbg(WCN36XX_DBG_HAL, "delete station indication %pM index %d\n",
- rsp->addr2, rsp->sta_id);
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "delete station indication %pM index %d reason %d\n",
+ rsp->addr2, rsp->sta_id, rsp->reason_code);
- list_for_each_entry(tmp, &wcn->vif_list, list) {
+ list_for_each_entry(vif_priv, &wcn->vif_list, list) {
rcu_read_lock();
- sta = ieee80211_find_sta(wcn36xx_priv_to_vif(tmp), rsp->addr2);
- if (sta)
- ieee80211_report_low_ack(sta, 0);
+ vif = wcn36xx_priv_to_vif(vif_priv);
+
+ if (vif->type == NL80211_IFTYPE_STATION) {
+ /* We could call ieee80211_find_sta too, but checking
+ * bss_conf is clearer.
+ */
+ bss_conf = &vif->bss_conf;
+ if (vif_priv->sta_assoc &&
+ !memcmp(bss_conf->bssid, rsp->addr2, ETH_ALEN)) {
+ found = true;
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "connection loss bss_index %d\n",
+ vif_priv->bss_index);
+ ieee80211_connection_loss(vif);
+ }
+ } else {
+ sta = ieee80211_find_sta(vif, rsp->addr2);
+ if (sta) {
+ found = true;
+ ieee80211_report_low_ack(sta, 0);
+ }
+ }
+
rcu_read_unlock();
- if (sta)
+ if (found)
return 0;
}
- wcn36xx_warn("STA with addr %pM and index %d not found\n",
- rsp->addr2,
- rsp->sta_id);
+ wcn36xx_warn("BSS or STA with addr %pM not found\n", rsp->addr2);
return -ENOENT;
}
@@ -3060,6 +3135,8 @@ int wcn36xx_smd_rsp_process(struct rpmsg_device *rpdev,
case WCN36XX_HAL_GTK_OFFLOAD_RSP:
case WCN36XX_HAL_GTK_OFFLOAD_GETINFO_RSP:
case WCN36XX_HAL_HOST_RESUME_RSP:
+ case WCN36XX_HAL_ENTER_IMPS_RSP:
+ case WCN36XX_HAL_EXIT_IMPS_RSP:
memcpy(wcn->hal_buf, buf, len);
wcn->hal_rsp_len = len;
complete(&wcn->hal_rsp_compl);
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.h b/drivers/net/wireless/ath/wcn36xx/smd.h
index d8bded03945d..5f98c1d01ae4 100644
--- a/drivers/net/wireless/ath/wcn36xx/smd.h
+++ b/drivers/net/wireless/ath/wcn36xx/smd.h
@@ -163,4 +163,7 @@ int wcn36xx_smd_wlan_host_suspend_ind(struct wcn36xx *wcn);
int wcn36xx_smd_host_resume(struct wcn36xx *wcn);
+int wcn36xx_smd_enter_imps(struct wcn36xx *wcn);
+int wcn36xx_smd_exit_imps(struct wcn36xx *wcn);
+
#endif /* _SMD_H_ */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index 9db12ffd2ff8..fb727778312c 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -1783,8 +1783,8 @@ brcmf_set_key_mgmt(struct net_device *ndev, struct cfg80211_connect_params *sme)
val = WPA_AUTH_PSK;
break;
default:
- bphy_err(drvr, "invalid cipher group (%d)\n",
- sme->crypto.cipher_group);
+ bphy_err(drvr, "invalid akm suite (%d)\n",
+ sme->crypto.akm_suites[0]);
return -EINVAL;
}
} else if (val & (WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED)) {
@@ -1816,8 +1816,8 @@ brcmf_set_key_mgmt(struct net_device *ndev, struct cfg80211_connect_params *sme)
profile->is_ft = true;
break;
default:
- bphy_err(drvr, "invalid cipher group (%d)\n",
- sme->crypto.cipher_group);
+ bphy_err(drvr, "invalid akm suite (%d)\n",
+ sme->crypto.akm_suites[0]);
return -EINVAL;
}
} else if (val & WPA3_AUTH_SAE_PSK) {
@@ -1838,8 +1838,8 @@ brcmf_set_key_mgmt(struct net_device *ndev, struct cfg80211_connect_params *sme)
}
break;
default:
- bphy_err(drvr, "invalid cipher group (%d)\n",
- sme->crypto.cipher_group);
+ bphy_err(drvr, "invalid akm suite (%d)\n",
+ sme->crypto.akm_suites[0]);
return -EINVAL;
}
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c
index 6d5188b78f2d..0af452dca766 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c
@@ -76,6 +76,16 @@ static const struct dmi_system_id dmi_platform_data[] = {
.driver_data = (void *)&acepc_t8_data,
},
{
+ /* Cyberbook T116 rugged tablet */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Default string"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "20170531"),
+ },
+ /* The factory image nvram file is identical to the ACEPC T8 one */
+ .driver_data = (void *)&acepc_t8_data,
+ },
+ {
/* Match for the GPDwin which unfortunately uses somewhat
* generic dmi strings, which is why we test for 4 strings.
* Comparing against 23 other byt/cht boards, board_vendor
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c
index 2f7bc3a70c65..513c7e6421b2 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c
@@ -29,7 +29,7 @@ static int brcmf_of_get_country_codes(struct device *dev,
return (count == -EINVAL) ? 0 : count;
}
- cc = devm_kzalloc(dev, sizeof(*cc) + count * sizeof(*cce), GFP_KERNEL);
+ cc = devm_kzalloc(dev, struct_size(cc, table, count), GFP_KERNEL);
if (!cc)
return -ENOMEM;
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2200.c b/drivers/net/wireless/intel/ipw2x00/ipw2200.c
index ada6ce32c1f1..9a99f482c84a 100644
--- a/drivers/net/wireless/intel/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.c
@@ -3777,7 +3777,7 @@ static int ipw_queue_tx_init(struct ipw_priv *priv,
dma_alloc_coherent(&dev->dev, sizeof(q->bd[0]) * count,
&q->q.dma_addr, GFP_KERNEL);
if (!q->bd) {
- IPW_ERROR("pci_alloc_consistent(%zd) failed\n",
+ IPW_ERROR("dma_alloc_coherent(%zd) failed\n",
sizeof(q->bd[0]) * count);
kfree(q->txb);
q->txb = NULL;
diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
index 0961f4a5e415..d62a20de3ada 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
@@ -908,16 +908,20 @@ mwifiex_init_new_priv_params(struct mwifiex_private *priv,
switch (type) {
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_ADHOC:
- priv->bss_role = MWIFIEX_BSS_ROLE_STA;
+ priv->bss_role = MWIFIEX_BSS_ROLE_STA;
+ priv->bss_type = MWIFIEX_BSS_TYPE_STA;
break;
case NL80211_IFTYPE_P2P_CLIENT:
- priv->bss_role = MWIFIEX_BSS_ROLE_STA;
+ priv->bss_role = MWIFIEX_BSS_ROLE_STA;
+ priv->bss_type = MWIFIEX_BSS_TYPE_P2P;
break;
case NL80211_IFTYPE_P2P_GO:
- priv->bss_role = MWIFIEX_BSS_ROLE_UAP;
+ priv->bss_role = MWIFIEX_BSS_ROLE_UAP;
+ priv->bss_type = MWIFIEX_BSS_TYPE_P2P;
break;
case NL80211_IFTYPE_AP:
priv->bss_role = MWIFIEX_BSS_ROLE_UAP;
+ priv->bss_type = MWIFIEX_BSS_TYPE_UAP;
break;
default:
mwifiex_dbg(adapter, ERROR,
@@ -939,6 +943,117 @@ mwifiex_init_new_priv_params(struct mwifiex_private *priv,
return 0;
}
+static bool
+is_vif_type_change_allowed(struct mwifiex_adapter *adapter,
+ enum nl80211_iftype old_iftype,
+ enum nl80211_iftype new_iftype)
+{
+ switch (old_iftype) {
+ case NL80211_IFTYPE_ADHOC:
+ switch (new_iftype) {
+ case NL80211_IFTYPE_STATION:
+ return true;
+ case NL80211_IFTYPE_P2P_CLIENT:
+ case NL80211_IFTYPE_P2P_GO:
+ return adapter->curr_iface_comb.p2p_intf !=
+ adapter->iface_limit.p2p_intf;
+ case NL80211_IFTYPE_AP:
+ return adapter->curr_iface_comb.uap_intf !=
+ adapter->iface_limit.uap_intf;
+ default:
+ return false;
+ }
+
+ case NL80211_IFTYPE_STATION:
+ switch (new_iftype) {
+ case NL80211_IFTYPE_ADHOC:
+ return true;
+ case NL80211_IFTYPE_P2P_CLIENT:
+ case NL80211_IFTYPE_P2P_GO:
+ return adapter->curr_iface_comb.p2p_intf !=
+ adapter->iface_limit.p2p_intf;
+ case NL80211_IFTYPE_AP:
+ return adapter->curr_iface_comb.uap_intf !=
+ adapter->iface_limit.uap_intf;
+ default:
+ return false;
+ }
+
+ case NL80211_IFTYPE_AP:
+ switch (new_iftype) {
+ case NL80211_IFTYPE_ADHOC:
+ case NL80211_IFTYPE_STATION:
+ return adapter->curr_iface_comb.sta_intf !=
+ adapter->iface_limit.sta_intf;
+ case NL80211_IFTYPE_P2P_CLIENT:
+ case NL80211_IFTYPE_P2P_GO:
+ return adapter->curr_iface_comb.p2p_intf !=
+ adapter->iface_limit.p2p_intf;
+ default:
+ return false;
+ }
+
+ case NL80211_IFTYPE_P2P_CLIENT:
+ switch (new_iftype) {
+ case NL80211_IFTYPE_ADHOC:
+ case NL80211_IFTYPE_STATION:
+ return true;
+ case NL80211_IFTYPE_P2P_GO:
+ return true;
+ case NL80211_IFTYPE_AP:
+ return adapter->curr_iface_comb.uap_intf !=
+ adapter->iface_limit.uap_intf;
+ default:
+ return false;
+ }
+
+ case NL80211_IFTYPE_P2P_GO:
+ switch (new_iftype) {
+ case NL80211_IFTYPE_ADHOC:
+ case NL80211_IFTYPE_STATION:
+ return true;
+ case NL80211_IFTYPE_P2P_CLIENT:
+ return true;
+ case NL80211_IFTYPE_AP:
+ return adapter->curr_iface_comb.uap_intf !=
+ adapter->iface_limit.uap_intf;
+ default:
+ return false;
+ }
+
+ default:
+ break;
+ }
+
+ return false;
+}
+
+static void
+update_vif_type_counter(struct mwifiex_adapter *adapter,
+ enum nl80211_iftype iftype,
+ int change)
+{
+ switch (iftype) {
+ case NL80211_IFTYPE_UNSPECIFIED:
+ case NL80211_IFTYPE_ADHOC:
+ case NL80211_IFTYPE_STATION:
+ adapter->curr_iface_comb.sta_intf += change;
+ break;
+ case NL80211_IFTYPE_AP:
+ adapter->curr_iface_comb.uap_intf += change;
+ break;
+ case NL80211_IFTYPE_P2P_CLIENT:
+ case NL80211_IFTYPE_P2P_GO:
+ adapter->curr_iface_comb.p2p_intf += change;
+ break;
+ default:
+ mwifiex_dbg(adapter, ERROR,
+ "%s: Unsupported iftype passed: %d\n",
+ __func__, iftype);
+ break;
+ }
+}
+
static int
mwifiex_change_vif_to_p2p(struct net_device *dev,
enum nl80211_iftype curr_iftype,
@@ -955,13 +1070,6 @@ mwifiex_change_vif_to_p2p(struct net_device *dev,
adapter = priv->adapter;
- if (adapter->curr_iface_comb.p2p_intf ==
- adapter->iface_limit.p2p_intf) {
- mwifiex_dbg(adapter, ERROR,
- "cannot create multiple P2P ifaces\n");
- return -1;
- }
-
mwifiex_dbg(adapter, INFO,
"%s: changing role to p2p\n", dev->name);
@@ -970,6 +1078,10 @@ mwifiex_change_vif_to_p2p(struct net_device *dev,
if (mwifiex_init_new_priv_params(priv, dev, type))
return -1;
+ update_vif_type_counter(adapter, curr_iftype, -1);
+ update_vif_type_counter(adapter, type, +1);
+ dev->ieee80211_ptr->iftype = type;
+
switch (type) {
case NL80211_IFTYPE_P2P_CLIENT:
if (mwifiex_cfg80211_init_p2p_client(priv))
@@ -993,21 +1105,6 @@ mwifiex_change_vif_to_p2p(struct net_device *dev,
if (mwifiex_sta_init_cmd(priv, false, false))
return -1;
- switch (curr_iftype) {
- case NL80211_IFTYPE_STATION:
- case NL80211_IFTYPE_ADHOC:
- adapter->curr_iface_comb.sta_intf--;
- break;
- case NL80211_IFTYPE_AP:
- adapter->curr_iface_comb.uap_intf--;
- break;
- default:
- break;
- }
-
- adapter->curr_iface_comb.p2p_intf++;
- dev->ieee80211_ptr->iftype = type;
-
return 0;
}
@@ -1027,15 +1124,6 @@ mwifiex_change_vif_to_sta_adhoc(struct net_device *dev,
adapter = priv->adapter;
- if ((curr_iftype != NL80211_IFTYPE_P2P_CLIENT &&
- curr_iftype != NL80211_IFTYPE_P2P_GO) &&
- (adapter->curr_iface_comb.sta_intf ==
- adapter->iface_limit.sta_intf)) {
- mwifiex_dbg(adapter, ERROR,
- "cannot create multiple station/adhoc ifaces\n");
- return -1;
- }
-
if (type == NL80211_IFTYPE_STATION)
mwifiex_dbg(adapter, INFO,
"%s: changing role to station\n", dev->name);
@@ -1047,26 +1135,17 @@ mwifiex_change_vif_to_sta_adhoc(struct net_device *dev,
return -1;
if (mwifiex_init_new_priv_params(priv, dev, type))
return -1;
+
+ update_vif_type_counter(adapter, curr_iftype, -1);
+ update_vif_type_counter(adapter, type, +1);
+ dev->ieee80211_ptr->iftype = type;
+
if (mwifiex_send_cmd(priv, HostCmd_CMD_SET_BSS_MODE,
HostCmd_ACT_GEN_SET, 0, NULL, true))
return -1;
if (mwifiex_sta_init_cmd(priv, false, false))
return -1;
- switch (curr_iftype) {
- case NL80211_IFTYPE_P2P_CLIENT:
- case NL80211_IFTYPE_P2P_GO:
- adapter->curr_iface_comb.p2p_intf--;
- break;
- case NL80211_IFTYPE_AP:
- adapter->curr_iface_comb.uap_intf--;
- break;
- default:
- break;
- }
-
- adapter->curr_iface_comb.sta_intf++;
- dev->ieee80211_ptr->iftype = type;
return 0;
}
@@ -1086,13 +1165,6 @@ mwifiex_change_vif_to_ap(struct net_device *dev,
adapter = priv->adapter;
- if (adapter->curr_iface_comb.uap_intf ==
- adapter->iface_limit.uap_intf) {
- mwifiex_dbg(adapter, ERROR,
- "cannot create multiple AP ifaces\n");
- return -1;
- }
-
mwifiex_dbg(adapter, INFO,
"%s: changing role to AP\n", dev->name);
@@ -1100,27 +1172,17 @@ mwifiex_change_vif_to_ap(struct net_device *dev,
return -1;
if (mwifiex_init_new_priv_params(priv, dev, type))
return -1;
+
+ update_vif_type_counter(adapter, curr_iftype, -1);
+ update_vif_type_counter(adapter, type, +1);
+ dev->ieee80211_ptr->iftype = type;
+
if (mwifiex_send_cmd(priv, HostCmd_CMD_SET_BSS_MODE,
HostCmd_ACT_GEN_SET, 0, NULL, true))
return -1;
if (mwifiex_sta_init_cmd(priv, false, false))
return -1;
- switch (curr_iftype) {
- case NL80211_IFTYPE_P2P_CLIENT:
- case NL80211_IFTYPE_P2P_GO:
- adapter->curr_iface_comb.p2p_intf--;
- break;
- case NL80211_IFTYPE_STATION:
- case NL80211_IFTYPE_ADHOC:
- adapter->curr_iface_comb.sta_intf--;
- break;
- default:
- break;
- }
-
- adapter->curr_iface_comb.uap_intf++;
- dev->ieee80211_ptr->iftype = type;
return 0;
}
/*
@@ -1141,6 +1203,27 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy,
return -EBUSY;
}
+ if (type == NL80211_IFTYPE_UNSPECIFIED) {
+ mwifiex_dbg(priv->adapter, INFO,
+ "%s: no new type specified, keeping old type %d\n",
+ dev->name, curr_iftype);
+ return 0;
+ }
+
+ if (curr_iftype == type) {
+ mwifiex_dbg(priv->adapter, INFO,
+ "%s: interface already is of type %d\n",
+ dev->name, curr_iftype);
+ return 0;
+ }
+
+ if (!is_vif_type_change_allowed(priv->adapter, curr_iftype, type)) {
+ mwifiex_dbg(priv->adapter, ERROR,
+ "%s: change from type %d to %d is not allowed\n",
+ dev->name, curr_iftype, type);
+ return -EOPNOTSUPP;
+ }
+
switch (curr_iftype) {
case NL80211_IFTYPE_ADHOC:
switch (type) {
@@ -1160,19 +1243,10 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy,
case NL80211_IFTYPE_AP:
return mwifiex_change_vif_to_ap(dev, curr_iftype, type,
params);
- case NL80211_IFTYPE_UNSPECIFIED:
- mwifiex_dbg(priv->adapter, INFO,
- "%s: kept type as IBSS\n", dev->name);
- fallthrough;
- case NL80211_IFTYPE_ADHOC: /* This shouldn't happen */
- return 0;
default:
- mwifiex_dbg(priv->adapter, ERROR,
- "%s: changing to %d not supported\n",
- dev->name, type);
- return -EOPNOTSUPP;
+ goto errnotsupp;
}
- break;
+
case NL80211_IFTYPE_STATION:
switch (type) {
case NL80211_IFTYPE_ADHOC:
@@ -1191,22 +1265,14 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy,
case NL80211_IFTYPE_AP:
return mwifiex_change_vif_to_ap(dev, curr_iftype, type,
params);
- case NL80211_IFTYPE_UNSPECIFIED:
- mwifiex_dbg(priv->adapter, INFO,
- "%s: kept type as STA\n", dev->name);
- fallthrough;
- case NL80211_IFTYPE_STATION: /* This shouldn't happen */
- return 0;
default:
- mwifiex_dbg(priv->adapter, ERROR,
- "%s: changing to %d not supported\n",
- dev->name, type);
- return -EOPNOTSUPP;
+ goto errnotsupp;
}
- break;
+
case NL80211_IFTYPE_AP:
switch (type) {
case NL80211_IFTYPE_ADHOC:
+ case NL80211_IFTYPE_STATION:
return mwifiex_change_vif_to_sta_adhoc(dev, curr_iftype,
type, params);
break;
@@ -1214,69 +1280,60 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy,
case NL80211_IFTYPE_P2P_GO:
return mwifiex_change_vif_to_p2p(dev, curr_iftype,
type, params);
- case NL80211_IFTYPE_UNSPECIFIED:
- mwifiex_dbg(priv->adapter, INFO,
- "%s: kept type as AP\n", dev->name);
- fallthrough;
- case NL80211_IFTYPE_AP: /* This shouldn't happen */
- return 0;
default:
- mwifiex_dbg(priv->adapter, ERROR,
- "%s: changing to %d not supported\n",
- dev->name, type);
- return -EOPNOTSUPP;
+ goto errnotsupp;
}
- break;
+
case NL80211_IFTYPE_P2P_CLIENT:
- case NL80211_IFTYPE_P2P_GO:
+ if (mwifiex_cfg80211_deinit_p2p(priv))
+ return -EFAULT;
+
switch (type) {
- case NL80211_IFTYPE_STATION:
- if (mwifiex_cfg80211_deinit_p2p(priv))
- return -EFAULT;
- priv->adapter->curr_iface_comb.p2p_intf--;
- priv->adapter->curr_iface_comb.sta_intf++;
- dev->ieee80211_ptr->iftype = type;
- if (mwifiex_deinit_priv_params(priv))
- return -1;
- if (mwifiex_init_new_priv_params(priv, dev, type))
- return -1;
- if (mwifiex_sta_init_cmd(priv, false, false))
- return -1;
- break;
case NL80211_IFTYPE_ADHOC:
- if (mwifiex_cfg80211_deinit_p2p(priv))
- return -EFAULT;
+ case NL80211_IFTYPE_STATION:
return mwifiex_change_vif_to_sta_adhoc(dev, curr_iftype,
type, params);
- break;
+ case NL80211_IFTYPE_P2P_GO:
+ return mwifiex_change_vif_to_p2p(dev, curr_iftype,
+ type, params);
case NL80211_IFTYPE_AP:
- if (mwifiex_cfg80211_deinit_p2p(priv))
- return -EFAULT;
return mwifiex_change_vif_to_ap(dev, curr_iftype, type,
params);
- case NL80211_IFTYPE_UNSPECIFIED:
- mwifiex_dbg(priv->adapter, INFO,
- "%s: kept type as P2P\n", dev->name);
- fallthrough;
+ default:
+ goto errnotsupp;
+ }
+
+ case NL80211_IFTYPE_P2P_GO:
+ if (mwifiex_cfg80211_deinit_p2p(priv))
+ return -EFAULT;
+
+ switch (type) {
+ case NL80211_IFTYPE_ADHOC:
+ case NL80211_IFTYPE_STATION:
+ return mwifiex_change_vif_to_sta_adhoc(dev, curr_iftype,
+ type, params);
case NL80211_IFTYPE_P2P_CLIENT:
- case NL80211_IFTYPE_P2P_GO:
- return 0;
+ return mwifiex_change_vif_to_p2p(dev, curr_iftype,
+ type, params);
+ case NL80211_IFTYPE_AP:
+ return mwifiex_change_vif_to_ap(dev, curr_iftype, type,
+ params);
default:
- mwifiex_dbg(priv->adapter, ERROR,
- "%s: changing to %d not supported\n",
- dev->name, type);
- return -EOPNOTSUPP;
+ goto errnotsupp;
}
- break;
+
default:
- mwifiex_dbg(priv->adapter, ERROR,
- "%s: unknown iftype: %d\n",
- dev->name, dev->ieee80211_ptr->iftype);
- return -EOPNOTSUPP;
+ goto errnotsupp;
}
return 0;
+
+errnotsupp:
+ mwifiex_dbg(priv->adapter, ERROR,
+ "unsupported interface type transition: %d to %d\n",
+ curr_iftype, type);
+ return -EOPNOTSUPP;
}
static void
@@ -2997,7 +3054,7 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
priv->bss_type = MWIFIEX_BSS_TYPE_P2P;
priv->frame_type = MWIFIEX_DATA_FRAME_TYPE_ETH_II;
- priv->bss_priority = MWIFIEX_BSS_ROLE_STA;
+ priv->bss_priority = 0;
priv->bss_role = MWIFIEX_BSS_ROLE_STA;
priv->bss_started = 0;
@@ -3108,23 +3165,7 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
mwifiex_dev_debugfs_init(priv);
#endif
- switch (type) {
- case NL80211_IFTYPE_UNSPECIFIED:
- case NL80211_IFTYPE_STATION:
- case NL80211_IFTYPE_ADHOC:
- adapter->curr_iface_comb.sta_intf++;
- break;
- case NL80211_IFTYPE_AP:
- adapter->curr_iface_comb.uap_intf++;
- break;
- case NL80211_IFTYPE_P2P_CLIENT:
- adapter->curr_iface_comb.p2p_intf++;
- break;
- default:
- /* This should be dead code; checked above */
- mwifiex_dbg(adapter, ERROR, "type not supported\n");
- return ERR_PTR(-EINVAL);
- }
+ update_vif_type_counter(adapter, type, +1);
return &priv->wdev;
@@ -3190,24 +3231,7 @@ int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev)
/* Clear the priv in adapter */
priv->netdev = NULL;
- switch (priv->bss_mode) {
- case NL80211_IFTYPE_UNSPECIFIED:
- case NL80211_IFTYPE_STATION:
- case NL80211_IFTYPE_ADHOC:
- adapter->curr_iface_comb.sta_intf--;
- break;
- case NL80211_IFTYPE_AP:
- adapter->curr_iface_comb.uap_intf--;
- break;
- case NL80211_IFTYPE_P2P_CLIENT:
- case NL80211_IFTYPE_P2P_GO:
- adapter->curr_iface_comb.p2p_intf--;
- break;
- default:
- mwifiex_dbg(adapter, ERROR,
- "del_virtual_intf: type not supported\n");
- break;
- }
+ update_vif_type_counter(adapter, priv->bss_mode, -1);
priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED;
diff --git a/drivers/net/wireless/microchip/wilc1000/cfg80211.c b/drivers/net/wireless/microchip/wilc1000/cfg80211.c
index 96973ec7bd9a..dc4bfe7be378 100644
--- a/drivers/net/wireless/microchip/wilc1000/cfg80211.c
+++ b/drivers/net/wireless/microchip/wilc1000/cfg80211.c
@@ -129,8 +129,7 @@ static void cfg_scan_result(enum scan_event scan_event,
info->frame_len,
(s32)info->rssi * 100,
GFP_KERNEL);
- if (!bss)
- cfg80211_put_bss(wiphy, bss);
+ cfg80211_put_bss(wiphy, bss);
} else if (scan_event == SCAN_EVENT_DONE) {
mutex_lock(&priv->scan_req_lock);
@@ -729,6 +728,7 @@ static int get_station(struct wiphy *wiphy, struct net_device *dev,
{
struct wilc_vif *vif = netdev_priv(dev);
struct wilc_priv *priv = &vif->priv;
+ struct wilc *wilc = vif->wilc;
u32 i = 0;
u32 associatedsta = ~0;
u32 inactive_time = 0;
@@ -755,6 +755,9 @@ static int get_station(struct wiphy *wiphy, struct net_device *dev,
} else if (vif->iftype == WILC_STATION_MODE) {
struct rf_info stats;
+ if (!wilc->initialized)
+ return -EBUSY;
+
wilc_get_statistics(vif, &stats);
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL) |
@@ -1581,6 +1584,7 @@ static void wilc_set_wakeup(struct wiphy *wiphy, bool enabled)
}
netdev_info(vif->ndev, "cfg set wake up = %d\n", enabled);
+ wilc_set_wowlan_trigger(vif, enabled);
srcu_read_unlock(&wl->srcu, srcu_idx);
}
@@ -1683,6 +1687,7 @@ static void wlan_init_locks(struct wilc *wl)
mutex_init(&wl->rxq_cs);
mutex_init(&wl->cfg_cmd_lock);
mutex_init(&wl->vif_mutex);
+ mutex_init(&wl->deinit_lock);
spin_lock_init(&wl->txq_spinlock);
mutex_init(&wl->txq_add_to_head_cs);
@@ -1701,6 +1706,7 @@ void wlan_deinit_locks(struct wilc *wilc)
mutex_destroy(&wilc->cfg_cmd_lock);
mutex_destroy(&wilc->txq_add_to_head_cs);
mutex_destroy(&wilc->vif_mutex);
+ mutex_destroy(&wilc->deinit_lock);
cleanup_srcu_struct(&wilc->srcu);
}
@@ -1724,7 +1730,6 @@ int wilc_cfg80211_init(struct wilc **wilc, struct device *dev, int io_type,
*wilc = wl;
wl->io_type = io_type;
wl->hif_func = ops;
- wl->chip_ps_state = WILC_CHIP_WAKEDUP;
for (i = 0; i < NQUEUES; i++)
INIT_LIST_HEAD(&wl->txq[i].txq_head.list);
diff --git a/drivers/net/wireless/microchip/wilc1000/hif.c b/drivers/net/wireless/microchip/wilc1000/hif.c
index a133736a7821..e69b9c7f3d31 100644
--- a/drivers/net/wireless/microchip/wilc1000/hif.c
+++ b/drivers/net/wireless/microchip/wilc1000/hif.c
@@ -23,6 +23,10 @@ struct wilc_set_multicast {
u8 *mc_list;
};
+struct host_if_wowlan_trigger {
+ u8 wowlan_trigger;
+};
+
struct wilc_del_all_sta {
u8 assoc_sta;
u8 mac[WILC_MAX_NUM_STA][ETH_ALEN];
@@ -34,6 +38,7 @@ union wilc_message_body {
struct wilc_set_multicast mc_info;
struct wilc_remain_ch remain_on_ch;
char *data;
+ struct host_if_wowlan_trigger wow_trigger;
};
struct host_if_msg {
@@ -962,6 +967,25 @@ error:
kfree(msg);
}
+void wilc_set_wowlan_trigger(struct wilc_vif *vif, bool enabled)
+{
+ int ret;
+ struct wid wid;
+ u8 wowlan_trigger = 0;
+
+ if (enabled)
+ wowlan_trigger = 1;
+
+ wid.id = WID_WOWLAN_TRIGGER;
+ wid.type = WID_CHAR;
+ wid.val = &wowlan_trigger;
+ wid.size = sizeof(char);
+
+ ret = wilc_send_config_pkt(vif, WILC_SET_CFG, &wid, 1);
+ if (ret)
+ pr_err("Failed to send wowlan trigger config packet\n");
+}
+
static void handle_scan_timer(struct work_struct *work)
{
struct host_if_msg *msg = container_of(work, struct host_if_msg, work);
@@ -1494,7 +1518,6 @@ int wilc_init(struct net_device *dev, struct host_if_drv **hif_drv_handler)
{
struct host_if_drv *hif_drv;
struct wilc_vif *vif = netdev_priv(dev);
- struct wilc *wilc = vif->wilc;
hif_drv = kzalloc(sizeof(*hif_drv), GFP_KERNEL);
if (!hif_drv)
@@ -1504,9 +1527,6 @@ int wilc_init(struct net_device *dev, struct host_if_drv **hif_drv_handler)
vif->hif_drv = hif_drv;
- if (wilc->clients_count == 0)
- mutex_init(&wilc->deinit_lock);
-
timer_setup(&vif->periodic_rssi, get_periodic_rssi, 0);
mod_timer(&vif->periodic_rssi, jiffies + msecs_to_jiffies(5000));
@@ -1518,8 +1538,6 @@ int wilc_init(struct net_device *dev, struct host_if_drv **hif_drv_handler)
hif_drv->p2p_timeout = 0;
- wilc->clients_count++;
-
return 0;
}
@@ -1550,7 +1568,6 @@ int wilc_deinit(struct wilc_vif *vif)
kfree(hif_drv);
vif->hif_drv = NULL;
- vif->wilc->clients_count--;
mutex_unlock(&vif->wilc->deinit_lock);
return result;
}
diff --git a/drivers/net/wireless/microchip/wilc1000/hif.h b/drivers/net/wireless/microchip/wilc1000/hif.h
index 58811911213b..cccd54ed0518 100644
--- a/drivers/net/wireless/microchip/wilc1000/hif.h
+++ b/drivers/net/wireless/microchip/wilc1000/hif.h
@@ -207,6 +207,7 @@ int wilc_get_statistics(struct wilc_vif *vif, struct rf_info *stats);
int wilc_get_vif_idx(struct wilc_vif *vif);
int wilc_set_tx_power(struct wilc_vif *vif, u8 tx_power);
int wilc_get_tx_power(struct wilc_vif *vif, u8 *tx_power);
+void wilc_set_wowlan_trigger(struct wilc_vif *vif, bool enabled);
void wilc_scan_complete_received(struct wilc *wilc, u8 *buffer, u32 length);
void wilc_network_info_received(struct wilc *wilc, u8 *buffer, u32 length);
void wilc_gnrl_async_info_received(struct wilc *wilc, u8 *buffer, u32 length);
diff --git a/drivers/net/wireless/microchip/wilc1000/netdev.h b/drivers/net/wireless/microchip/wilc1000/netdev.h
index 86209b391a3d..79f73a72da57 100644
--- a/drivers/net/wireless/microchip/wilc1000/netdev.h
+++ b/drivers/net/wireless/microchip/wilc1000/netdev.h
@@ -264,9 +264,7 @@ struct wilc {
struct device *dev;
bool suspend_event;
- int clients_count;
struct workqueue_struct *hif_workqueue;
- enum chip_ps_states chip_ps_state;
struct wilc_cfg cfg;
void *bus_data;
struct net_device *monitor_dev;
diff --git a/drivers/net/wireless/microchip/wilc1000/sdio.c b/drivers/net/wireless/microchip/wilc1000/sdio.c
index 42e03a701ae1..26ebf6664342 100644
--- a/drivers/net/wireless/microchip/wilc1000/sdio.c
+++ b/drivers/net/wireless/microchip/wilc1000/sdio.c
@@ -978,6 +978,7 @@ static const struct wilc_hif_func wilc_hif_sdio = {
.hif_sync_ext = wilc_sdio_sync_ext,
.enable_interrupt = wilc_sdio_enable_interrupt,
.disable_interrupt = wilc_sdio_disable_interrupt,
+ .hif_reset = wilc_sdio_reset,
};
static int wilc_sdio_resume(struct device *dev)
diff --git a/drivers/net/wireless/microchip/wilc1000/spi.c b/drivers/net/wireless/microchip/wilc1000/spi.c
index dd481dc0b5ce..640850f989dd 100644
--- a/drivers/net/wireless/microchip/wilc1000/spi.c
+++ b/drivers/net/wireless/microchip/wilc1000/spi.c
@@ -47,6 +47,8 @@ struct wilc_spi {
static const struct wilc_hif_func wilc_hif_spi;
+static int wilc_spi_reset(struct wilc *wilc);
+
/********************************************
*
* Spi protocol Function
@@ -144,6 +146,12 @@ struct wilc_spi_rsp_data {
u8 data[];
} __packed;
+struct wilc_spi_special_cmd_rsp {
+ u8 skip_byte;
+ u8 rsp_cmd_type;
+ u8 status;
+} __packed;
+
static int wilc_bus_probe(struct spi_device *spi)
{
int ret;
@@ -466,7 +474,7 @@ static int wilc_spi_single_read(struct wilc *wilc, u8 cmd, u32 adr, void *b,
}
r = (struct wilc_spi_rsp_data *)&rb[cmd_len];
- if (r->rsp_cmd_type != cmd) {
+ if (r->rsp_cmd_type != cmd && !clockless) {
if (!spi_priv->probing_crc)
dev_err(&spi->dev,
"Failed cmd, cmd (%02x), resp (%02x)\n",
@@ -474,7 +482,7 @@ static int wilc_spi_single_read(struct wilc *wilc, u8 cmd, u32 adr, void *b,
return -EINVAL;
}
- if (r->status != WILC_SPI_COMMAND_STAT_SUCCESS) {
+ if (r->status != WILC_SPI_COMMAND_STAT_SUCCESS && !clockless) {
dev_err(&spi->dev, "Failed cmd state response state (%02x)\n",
r->status);
return -EINVAL;
@@ -563,14 +571,18 @@ static int wilc_spi_write_cmd(struct wilc *wilc, u8 cmd, u32 adr, u32 data,
}
r = (struct wilc_spi_rsp_data *)&rb[cmd_len];
- if (r->rsp_cmd_type != cmd) {
+ /*
+ * Clockless registers operations might return unexptected responses,
+ * even if successful.
+ */
+ if (r->rsp_cmd_type != cmd && !clockless) {
dev_err(&spi->dev,
"Failed cmd response, cmd (%02x), resp (%02x)\n",
cmd, r->rsp_cmd_type);
return -EINVAL;
}
- if (r->status != WILC_SPI_COMMAND_STAT_SUCCESS) {
+ if (r->status != WILC_SPI_COMMAND_STAT_SUCCESS && !clockless) {
dev_err(&spi->dev, "Failed cmd state response state (%02x)\n",
r->status);
return -EINVAL;
@@ -709,6 +721,61 @@ static int wilc_spi_dma_rw(struct wilc *wilc, u8 cmd, u32 adr, u8 *b, u32 sz)
return 0;
}
+static int wilc_spi_special_cmd(struct wilc *wilc, u8 cmd)
+{
+ struct spi_device *spi = to_spi_device(wilc->dev);
+ struct wilc_spi *spi_priv = wilc->bus_data;
+ u8 wb[32], rb[32];
+ int cmd_len, resp_len = 0;
+ struct wilc_spi_cmd *c;
+ struct wilc_spi_special_cmd_rsp *r;
+
+ if (cmd != CMD_TERMINATE && cmd != CMD_REPEAT && cmd != CMD_RESET)
+ return -EINVAL;
+
+ memset(wb, 0x0, sizeof(wb));
+ memset(rb, 0x0, sizeof(rb));
+ c = (struct wilc_spi_cmd *)wb;
+ c->cmd_type = cmd;
+
+ if (cmd == CMD_RESET)
+ memset(c->u.simple_cmd.addr, 0xFF, 3);
+
+ cmd_len = offsetof(struct wilc_spi_cmd, u.simple_cmd.crc);
+ resp_len = sizeof(*r);
+
+ if (spi_priv->crc7_enabled) {
+ c->u.simple_cmd.crc[0] = wilc_get_crc7(wb, cmd_len);
+ cmd_len += 1;
+ }
+ if (cmd_len + resp_len > ARRAY_SIZE(wb)) {
+ dev_err(&spi->dev, "spi buffer size too small (%d) (%d) (%zu)\n",
+ cmd_len, resp_len, ARRAY_SIZE(wb));
+ return -EINVAL;
+ }
+
+ if (wilc_spi_tx_rx(wilc, wb, rb, cmd_len + resp_len)) {
+ dev_err(&spi->dev, "Failed cmd write, bus error...\n");
+ return -EINVAL;
+ }
+
+ r = (struct wilc_spi_special_cmd_rsp *)&rb[cmd_len];
+ if (r->rsp_cmd_type != cmd) {
+ if (!spi_priv->probing_crc)
+ dev_err(&spi->dev,
+ "Failed cmd response, cmd (%02x), resp (%02x)\n",
+ cmd, r->rsp_cmd_type);
+ return -EINVAL;
+ }
+
+ if (r->status != WILC_SPI_COMMAND_STAT_SUCCESS) {
+ dev_err(&spi->dev, "Failed cmd state response state (%02x)\n",
+ r->status);
+ return -EINVAL;
+ }
+ return 0;
+}
+
static int wilc_spi_read_reg(struct wilc *wilc, u32 addr, u32 *data)
{
struct spi_device *spi = to_spi_device(wilc->dev);
@@ -895,6 +962,19 @@ static int wilc_spi_write(struct wilc *wilc, u32 addr, u8 *buf, u32 size)
*
********************************************/
+static int wilc_spi_reset(struct wilc *wilc)
+{
+ struct spi_device *spi = to_spi_device(wilc->dev);
+ struct wilc_spi *spi_priv = wilc->bus_data;
+ int result;
+
+ result = wilc_spi_special_cmd(wilc, CMD_RESET);
+ if (result && !spi_priv->probing_crc)
+ dev_err(&spi->dev, "Failed cmd reset\n");
+
+ return result;
+}
+
static int wilc_spi_deinit(struct wilc *wilc)
{
/*
@@ -1087,7 +1167,7 @@ static int wilc_spi_sync_ext(struct wilc *wilc, int nint)
for (i = 0; (i < 3) && (nint > 0); i++, nint--)
reg |= BIT(i);
- ret = wilc_spi_read_reg(wilc, WILC_INTR2_ENABLE, &reg);
+ ret = wilc_spi_write_reg(wilc, WILC_INTR2_ENABLE, reg);
if (ret) {
dev_err(&spi->dev, "Failed write reg (%08x)...\n",
WILC_INTR2_ENABLE);
@@ -1112,4 +1192,5 @@ static const struct wilc_hif_func wilc_hif_spi = {
.hif_block_tx_ext = wilc_spi_write,
.hif_block_rx_ext = wilc_spi_read,
.hif_sync_ext = wilc_spi_sync_ext,
+ .hif_reset = wilc_spi_reset,
};
diff --git a/drivers/net/wireless/microchip/wilc1000/wlan.c b/drivers/net/wireless/microchip/wilc1000/wlan.c
index 200a103a0a85..ea81ef120fd1 100644
--- a/drivers/net/wireless/microchip/wilc1000/wlan.c
+++ b/drivers/net/wireless/microchip/wilc1000/wlan.c
@@ -10,6 +10,8 @@
#include "cfg80211.h"
#include "wlan_cfg.h"
+#define WAKE_UP_TRIAL_RETRY 10000
+
static inline bool is_wilc1000(u32 id)
{
return (id & (~WILC_CHIP_REV_FIELD)) == WILC_1000_BASE_ID;
@@ -425,6 +427,11 @@ int wilc_wlan_txq_add_net_pkt(struct net_device *dev,
return 0;
}
+ if (!wilc->initialized) {
+ tx_complete_fn(tx_data, 0);
+ return 0;
+ }
+
tqe = kmalloc(sizeof(*tqe), GFP_ATOMIC);
if (!tqe) {
@@ -474,6 +481,10 @@ int wilc_wlan_txq_add_mgmt_pkt(struct net_device *dev, void *priv, u8 *buffer,
return 0;
}
+ if (!wilc->initialized) {
+ tx_complete_fn(priv, 0);
+ return 0;
+ }
tqe = kmalloc(sizeof(*tqe), GFP_ATOMIC);
if (!tqe) {
@@ -611,60 +622,67 @@ EXPORT_SYMBOL_GPL(chip_allow_sleep);
void chip_wakeup(struct wilc *wilc)
{
- u32 reg, clk_status_reg;
- const struct wilc_hif_func *h = wilc->hif_func;
-
- if (wilc->io_type == WILC_HIF_SPI) {
- do {
- h->hif_read_reg(wilc, WILC_SPI_WAKEUP_REG, &reg);
- h->hif_write_reg(wilc, WILC_SPI_WAKEUP_REG,
- reg | WILC_SPI_WAKEUP_BIT);
- h->hif_write_reg(wilc, WILC_SPI_WAKEUP_REG,
- reg & ~WILC_SPI_WAKEUP_BIT);
-
- do {
- usleep_range(2000, 2500);
- wilc_get_chipid(wilc, true);
- } while (wilc_get_chipid(wilc, true) == 0);
- } while (wilc_get_chipid(wilc, true) == 0);
- } else if (wilc->io_type == WILC_HIF_SDIO) {
- h->hif_write_reg(wilc, WILC_SDIO_HOST_TO_FW_REG,
- WILC_SDIO_HOST_TO_FW_BIT);
- usleep_range(200, 400);
- h->hif_read_reg(wilc, WILC_SDIO_WAKEUP_REG, &reg);
- do {
- h->hif_write_reg(wilc, WILC_SDIO_WAKEUP_REG,
- reg | WILC_SDIO_WAKEUP_BIT);
- h->hif_read_reg(wilc, WILC_SDIO_CLK_STATUS_REG,
- &clk_status_reg);
-
- while (!(clk_status_reg & WILC_SDIO_CLK_STATUS_BIT)) {
- usleep_range(2000, 2500);
+ u32 ret = 0;
+ u32 clk_status_val = 0, trials = 0;
+ u32 wakeup_reg, wakeup_bit;
+ u32 clk_status_reg, clk_status_bit;
+ u32 to_host_from_fw_reg, to_host_from_fw_bit;
+ u32 from_host_to_fw_reg, from_host_to_fw_bit;
+ const struct wilc_hif_func *hif_func = wilc->hif_func;
- h->hif_read_reg(wilc, WILC_SDIO_CLK_STATUS_REG,
- &clk_status_reg);
- }
- if (!(clk_status_reg & WILC_SDIO_CLK_STATUS_BIT)) {
- h->hif_write_reg(wilc, WILC_SDIO_WAKEUP_REG,
- reg & ~WILC_SDIO_WAKEUP_BIT);
- }
- } while (!(clk_status_reg & WILC_SDIO_CLK_STATUS_BIT));
+ if (wilc->io_type == WILC_HIF_SDIO) {
+ wakeup_reg = WILC_SDIO_WAKEUP_REG;
+ wakeup_bit = WILC_SDIO_WAKEUP_BIT;
+ clk_status_reg = WILC_SDIO_CLK_STATUS_REG;
+ clk_status_bit = WILC_SDIO_CLK_STATUS_BIT;
+ from_host_to_fw_reg = WILC_SDIO_HOST_TO_FW_REG;
+ from_host_to_fw_bit = WILC_SDIO_HOST_TO_FW_BIT;
+ to_host_from_fw_reg = WILC_SDIO_FW_TO_HOST_REG;
+ to_host_from_fw_bit = WILC_SDIO_FW_TO_HOST_BIT;
+ } else {
+ wakeup_reg = WILC_SPI_WAKEUP_REG;
+ wakeup_bit = WILC_SPI_WAKEUP_BIT;
+ clk_status_reg = WILC_SPI_CLK_STATUS_REG;
+ clk_status_bit = WILC_SPI_CLK_STATUS_BIT;
+ from_host_to_fw_reg = WILC_SPI_HOST_TO_FW_REG;
+ from_host_to_fw_bit = WILC_SPI_HOST_TO_FW_BIT;
+ to_host_from_fw_reg = WILC_SPI_FW_TO_HOST_REG;
+ to_host_from_fw_bit = WILC_SPI_FW_TO_HOST_BIT;
}
- if (wilc->chip_ps_state == WILC_CHIP_SLEEPING_MANUAL) {
- if (wilc_get_chipid(wilc, false) < WILC_1000_BASE_ID_2B) {
- u32 val32;
+ /* indicate host wakeup */
+ ret = hif_func->hif_write_reg(wilc, from_host_to_fw_reg,
+ from_host_to_fw_bit);
+ if (ret)
+ return;
- h->hif_read_reg(wilc, WILC_REG_4_TO_1_RX, &val32);
- val32 |= BIT(6);
- h->hif_write_reg(wilc, WILC_REG_4_TO_1_RX, val32);
+ /* Set wake-up bit */
+ ret = hif_func->hif_write_reg(wilc, wakeup_reg,
+ wakeup_bit);
+ if (ret)
+ return;
- h->hif_read_reg(wilc, WILC_REG_4_TO_1_TX_BANK0, &val32);
- val32 |= BIT(6);
- h->hif_write_reg(wilc, WILC_REG_4_TO_1_TX_BANK0, val32);
+ while (trials < WAKE_UP_TRIAL_RETRY) {
+ ret = hif_func->hif_read_reg(wilc, clk_status_reg,
+ &clk_status_val);
+ if (ret) {
+ pr_err("Bus error %d %x\n", ret, clk_status_val);
+ return;
}
+ if (clk_status_val & clk_status_bit)
+ break;
+
+ trials++;
}
- wilc->chip_ps_state = WILC_CHIP_WAKEDUP;
+ if (trials >= WAKE_UP_TRIAL_RETRY) {
+ pr_err("Failed to wake-up the chip\n");
+ return;
+ }
+ /* Sometimes spi fail to read clock regs after reading
+ * writing clockless registers
+ */
+ if (wilc->io_type == WILC_HIF_SPI)
+ wilc->hif_func->hif_reset(wilc);
}
EXPORT_SYMBOL_GPL(chip_wakeup);
@@ -1071,6 +1089,7 @@ int wilc_wlan_firmware_download(struct wilc *wilc, const u8 *buffer,
u32 addr, size, size2, blksz;
u8 *dma_buffer;
int ret = 0;
+ u32 reg = 0;
blksz = BIT(12);
@@ -1079,10 +1098,22 @@ int wilc_wlan_firmware_download(struct wilc *wilc, const u8 *buffer,
return -EIO;
offset = 0;
+ pr_debug("%s: Downloading firmware size = %d\n", __func__, buffer_size);
+
+ acquire_bus(wilc, WILC_BUS_ACQUIRE_AND_WAKEUP);
+
+ wilc->hif_func->hif_read_reg(wilc, WILC_GLB_RESET_0, &reg);
+ reg &= ~BIT(10);
+ ret = wilc->hif_func->hif_write_reg(wilc, WILC_GLB_RESET_0, reg);
+ wilc->hif_func->hif_read_reg(wilc, WILC_GLB_RESET_0, &reg);
+ if (reg & BIT(10))
+ pr_err("%s: Failed to reset\n", __func__);
+
+ release_bus(wilc, WILC_BUS_RELEASE_ONLY);
do {
addr = get_unaligned_le32(&buffer[offset]);
size = get_unaligned_le32(&buffer[offset + 4]);
- acquire_bus(wilc, WILC_BUS_ACQUIRE_ONLY);
+ acquire_bus(wilc, WILC_BUS_ACQUIRE_AND_WAKEUP);
offset += 8;
while (((int)size) && (offset < buffer_size)) {
if (size <= blksz)
@@ -1100,10 +1131,13 @@ int wilc_wlan_firmware_download(struct wilc *wilc, const u8 *buffer,
offset += size2;
size -= size2;
}
- release_bus(wilc, WILC_BUS_RELEASE_ONLY);
+ release_bus(wilc, WILC_BUS_RELEASE_ALLOW_SLEEP);
- if (ret)
+ if (ret) {
+ pr_err("%s Bus error\n", __func__);
goto fail;
+ }
+ pr_debug("%s Offset = %d\n", __func__, offset);
} while (offset < buffer_size);
fail:
diff --git a/drivers/net/wireless/microchip/wilc1000/wlan.h b/drivers/net/wireless/microchip/wilc1000/wlan.h
index 771c25fa849b..13fde636aa0e 100644
--- a/drivers/net/wireless/microchip/wilc1000/wlan.h
+++ b/drivers/net/wireless/microchip/wilc1000/wlan.h
@@ -97,6 +97,8 @@
#define WILC_SPI_WAKEUP_REG 0x1
#define WILC_SPI_WAKEUP_BIT BIT(1)
+#define WILC_SPI_CLK_STATUS_REG 0x0f
+#define WILC_SPI_CLK_STATUS_BIT BIT(2)
#define WILC_SPI_HOST_TO_FW_REG 0x0b
#define WILC_SPI_HOST_TO_FW_BIT BIT(0)
@@ -300,7 +302,7 @@
#define ENABLE_RX_VMM (SEL_VMM_TBL1 | EN_VMM)
#define ENABLE_TX_VMM (SEL_VMM_TBL0 | EN_VMM)
/* time for expiring the completion of cfg packets */
-#define WILC_CFG_PKTS_TIMEOUT msecs_to_jiffies(2000)
+#define WILC_CFG_PKTS_TIMEOUT msecs_to_jiffies(3000)
#define IS_MANAGMEMENT 0x100
#define IS_MANAGMEMENT_CALLBACK 0x080
@@ -371,6 +373,7 @@ struct wilc_hif_func {
int (*hif_sync_ext)(struct wilc *wilc, int nint);
int (*enable_interrupt)(struct wilc *nic);
void (*disable_interrupt)(struct wilc *nic);
+ int (*hif_reset)(struct wilc *wilc);
};
#define WILC_MAX_CFG_FRAME_SIZE 1468
diff --git a/drivers/net/wireless/microchip/wilc1000/wlan_cfg.c b/drivers/net/wireless/microchip/wilc1000/wlan_cfg.c
index fe2a7ed8e5cd..dba301378b7f 100644
--- a/drivers/net/wireless/microchip/wilc1000/wlan_cfg.c
+++ b/drivers/net/wireless/microchip/wilc1000/wlan_cfg.c
@@ -22,6 +22,7 @@ static const struct wilc_cfg_byte g_cfg_byte[] = {
{WID_STATUS, 0},
{WID_RSSI, 0},
{WID_LINKSPEED, 0},
+ {WID_WOWLAN_TRIGGER, 0},
{WID_NIL, 0}
};
diff --git a/drivers/net/wireless/microchip/wilc1000/wlan_if.h b/drivers/net/wireless/microchip/wilc1000/wlan_if.h
index f85fd575136d..6eb7eb4ac294 100644
--- a/drivers/net/wireless/microchip/wilc1000/wlan_if.h
+++ b/drivers/net/wireless/microchip/wilc1000/wlan_if.h
@@ -48,12 +48,6 @@ enum {
WILC_FW_MAX_PSPOLL_PS = 4
};
-enum chip_ps_states {
- WILC_CHIP_WAKEDUP = 0,
- WILC_CHIP_SLEEPING_AUTO = 1,
- WILC_CHIP_SLEEPING_MANUAL = 2
-};
-
enum bus_acquire {
WILC_BUS_ACQUIRE_ONLY = 0,
WILC_BUS_ACQUIRE_AND_WAKEUP = 1,
@@ -662,6 +656,7 @@ enum {
WID_LOG_TERMINAL_SWITCH = 0x00CD,
WID_TX_POWER = 0x00CE,
+ WID_WOWLAN_TRIGGER = 0X00CF,
/* EMAC Short WID list */
/* RTS Threshold */
/*
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
index b5c67f656cfd..a3ffd1b0c9bc 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
@@ -1101,7 +1101,6 @@ static const struct usb_device_id rt2800usb_device_table[] = {
#ifdef CONFIG_RT2800USB_RT53XX
/* Arcadyan */
{ USB_DEVICE(0x043e, 0x7a12) },
- { USB_DEVICE(0x043e, 0x7a32) },
/* ASUS */
{ USB_DEVICE(0x0b05, 0x17e8) },
/* Azurewave */
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
index 774341b0005a..a42e2081b75f 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
@@ -4460,13 +4460,17 @@ void rtl8xxxu_gen1_init_aggregation(struct rtl8xxxu_priv *priv)
static void rtl8xxxu_set_basic_rates(struct rtl8xxxu_priv *priv, u32 rate_cfg)
{
+ struct ieee80211_hw *hw = priv->hw;
u32 val32;
u8 rate_idx = 0;
rate_cfg &= RESPONSE_RATE_BITMAP_ALL;
val32 = rtl8xxxu_read32(priv, REG_RESPONSE_RATE_SET);
- val32 &= ~RESPONSE_RATE_BITMAP_ALL;
+ if (hw->conf.chandef.chan->band == NL80211_BAND_5GHZ)
+ val32 &= RESPONSE_RATE_RRSR_INIT_5G;
+ else
+ val32 &= RESPONSE_RATE_RRSR_INIT_2G;
val32 |= rate_cfg;
rtl8xxxu_write32(priv, REG_RESPONSE_RATE_SET, val32);
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h
index a2a31f374a82..438b65ba9640 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h
@@ -516,6 +516,8 @@
#define REG_RESPONSE_RATE_SET 0x0440
#define RESPONSE_RATE_BITMAP_ALL 0xfffff
#define RESPONSE_RATE_RRSR_CCK_ONLY_1M 0xffff1
+#define RESPONSE_RATE_RRSR_INIT_2G 0x15f
+#define RESPONSE_RATE_RRSR_INIT_5G 0x150
#define RSR_1M BIT(0)
#define RSR_2M BIT(1)
#define RSR_5_5M BIT(2)
diff --git a/drivers/net/wireless/realtek/rtw88/debug.c b/drivers/net/wireless/realtek/rtw88/debug.c
index dfd52cff5d02..682b23502e6e 100644
--- a/drivers/net/wireless/realtek/rtw88/debug.c
+++ b/drivers/net/wireless/realtek/rtw88/debug.c
@@ -12,6 +12,7 @@
#include "phy.h"
#include "reg.h"
#include "ps.h"
+#include "regd.h"
#ifdef CONFIG_RTW88_DEBUGFS
@@ -587,7 +588,7 @@ static int rtw_debugfs_get_tx_pwr_tbl(struct seq_file *m, void *v)
struct rtw_power_params pwr_param = {0};
u8 bw = hal->current_band_width;
u8 ch = hal->current_channel;
- u8 regd = rtwdev->regd.txpwr_regd;
+ u8 regd = rtw_regd_get(rtwdev);
seq_printf(m, "regulatory: %s\n", rtw_get_regd_string(regd));
seq_printf(m, "%-4s %-10s %-3s%6s %-4s %4s (%-4s %-4s) %-4s\n",
@@ -828,6 +829,38 @@ static int rtw_debugfs_get_coex_enable(struct seq_file *m, void *v)
return 0;
}
+static ssize_t rtw_debugfs_set_edcca_enable(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *loff)
+{
+ struct seq_file *seqpriv = (struct seq_file *)filp->private_data;
+ struct rtw_debugfs_priv *debugfs_priv = seqpriv->private;
+ struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
+ bool input;
+ int err;
+
+ err = kstrtobool_from_user(buffer, count, &input);
+ if (err)
+ return err;
+
+ rtw_edcca_enabled = input;
+ rtw_phy_adaptivity_set_mode(rtwdev);
+
+ return count;
+}
+
+static int rtw_debugfs_get_edcca_enable(struct seq_file *m, void *v)
+{
+ struct rtw_debugfs_priv *debugfs_priv = m->private;
+ struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+
+ seq_printf(m, "EDCCA %s: EDCCA mode %d\n",
+ rtw_edcca_enabled ? "enabled" : "disabled",
+ dm_info->edcca_mode);
+ return 0;
+}
+
static ssize_t rtw_debugfs_set_fw_crash(struct file *filp,
const char __user *buffer,
size_t count, loff_t *loff)
@@ -853,6 +886,7 @@ static ssize_t rtw_debugfs_set_fw_crash(struct file *filp,
mutex_lock(&rtwdev->mutex);
rtw_leave_lps_deep(rtwdev);
+ set_bit(RTW_FLAG_RESTART_TRIGGERING, rtwdev->flags);
rtw_write8(rtwdev, REG_HRCV_MSG, 1);
mutex_unlock(&rtwdev->mutex);
@@ -864,7 +898,9 @@ static int rtw_debugfs_get_fw_crash(struct seq_file *m, void *v)
struct rtw_debugfs_priv *debugfs_priv = m->private;
struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
- seq_printf(m, "%d\n", test_bit(RTW_FLAG_RESTARTING, rtwdev->flags));
+ seq_printf(m, "%d\n",
+ test_bit(RTW_FLAG_RESTART_TRIGGERING, rtwdev->flags) ||
+ test_bit(RTW_FLAG_RESTARTING, rtwdev->flags));
return 0;
}
@@ -1048,6 +1084,11 @@ static struct rtw_debugfs_priv rtw_debug_priv_coex_info = {
.cb_read = rtw_debugfs_get_coex_info,
};
+static struct rtw_debugfs_priv rtw_debug_priv_edcca_enable = {
+ .cb_write = rtw_debugfs_set_edcca_enable,
+ .cb_read = rtw_debugfs_get_edcca_enable,
+};
+
static struct rtw_debugfs_priv rtw_debug_priv_fw_crash = {
.cb_write = rtw_debugfs_set_fw_crash,
.cb_read = rtw_debugfs_get_fw_crash,
@@ -1131,6 +1172,7 @@ void rtw_debugfs_init(struct rtw_dev *rtwdev)
}
rtw_debugfs_add_r(rf_dump);
rtw_debugfs_add_r(tx_pwr_tbl);
+ rtw_debugfs_add_rw(edcca_enable);
rtw_debugfs_add_rw(fw_crash);
rtw_debugfs_add_rw(dm_cap);
}
diff --git a/drivers/net/wireless/realtek/rtw88/debug.h b/drivers/net/wireless/realtek/rtw88/debug.h
index 0dd3f9a88c8d..47c57f395f52 100644
--- a/drivers/net/wireless/realtek/rtw88/debug.h
+++ b/drivers/net/wireless/realtek/rtw88/debug.h
@@ -21,6 +21,7 @@ enum rtw_debug_mask {
RTW_DBG_WOW = 0x00001000,
RTW_DBG_CFO = 0x00002000,
RTW_DBG_PATH_DIV = 0x00004000,
+ RTW_DBG_ADAPTIVITY = 0x00008000,
RTW_DBG_ALL = 0xffffffff
};
diff --git a/drivers/net/wireless/realtek/rtw88/fw.c b/drivers/net/wireless/realtek/rtw88/fw.c
index e6399519584b..0c4f2a2f2d7f 100644
--- a/drivers/net/wireless/realtek/rtw88/fw.c
+++ b/drivers/net/wireless/realtek/rtw88/fw.c
@@ -183,6 +183,28 @@ static void rtw_fw_scan_result(struct rtw_dev *rtwdev, u8 *payload,
dm_info->scan_density);
}
+static void rtw_fw_adaptivity_result(struct rtw_dev *rtwdev, u8 *payload,
+ u8 length)
+{
+ struct rtw_hw_reg_offset *edcca_th = rtwdev->chip->edcca_th;
+ struct rtw_c2h_adaptivity *result = (struct rtw_c2h_adaptivity *)payload;
+
+ rtw_dbg(rtwdev, RTW_DBG_ADAPTIVITY,
+ "Adaptivity: density %x igi %x l2h_th_init %x l2h %x h2l %x option %x\n",
+ result->density, result->igi, result->l2h_th_init, result->l2h,
+ result->h2l, result->option);
+
+ rtw_dbg(rtwdev, RTW_DBG_ADAPTIVITY, "Reg Setting: L2H %x H2L %x\n",
+ rtw_read32_mask(rtwdev, edcca_th[EDCCA_TH_L2H_IDX].hw_reg.addr,
+ edcca_th[EDCCA_TH_L2H_IDX].hw_reg.mask),
+ rtw_read32_mask(rtwdev, edcca_th[EDCCA_TH_H2L_IDX].hw_reg.addr,
+ edcca_th[EDCCA_TH_H2L_IDX].hw_reg.mask));
+
+ rtw_dbg(rtwdev, RTW_DBG_ADAPTIVITY, "EDCCA Flag %s\n",
+ rtw_read32_mask(rtwdev, REG_EDCCA_REPORT, BIT_EDCCA_FLAG) ?
+ "Set" : "Unset");
+}
+
void rtw_fw_c2h_cmd_handle(struct rtw_dev *rtwdev, struct sk_buff *skb)
{
struct rtw_c2h_cmd *c2h;
@@ -252,6 +274,10 @@ void rtw_fw_c2h_cmd_rx_irqsafe(struct rtw_dev *rtwdev, u32 pkt_offset,
rtw_fw_scan_result(rtwdev, c2h->payload, len);
dev_kfree_skb_any(skb);
break;
+ case C2H_ADAPTIVITY:
+ rtw_fw_adaptivity_result(rtwdev, c2h->payload, len);
+ dev_kfree_skb_any(skb);
+ break;
default:
/* pass offset for further operation */
*((u32 *)skb->cb) = pkt_offset;
@@ -1556,12 +1582,10 @@ static void rtw_fw_read_fifo_page(struct rtw_dev *rtwdev, u32 offset, u32 size,
u32 i;
u16 idx = 0;
u16 ctl;
- u8 rcr;
- rcr = rtw_read8(rtwdev, REG_RCR + 2);
ctl = rtw_read16(rtwdev, REG_PKTBUF_DBG_CTRL) & 0xf000;
/* disable rx clock gate */
- rtw_write8(rtwdev, REG_RCR, rcr | BIT(3));
+ rtw_write32_set(rtwdev, REG_RCR, BIT_DISGCLK);
do {
rtw_write16(rtwdev, REG_PKTBUF_DBG_CTRL, start_pg | ctl);
@@ -1580,7 +1604,8 @@ static void rtw_fw_read_fifo_page(struct rtw_dev *rtwdev, u32 offset, u32 size,
out:
rtw_write16(rtwdev, REG_PKTBUF_DBG_CTRL, ctl);
- rtw_write8(rtwdev, REG_RCR + 2, rcr);
+ /* restore rx clock gate */
+ rtw_write32_clr(rtwdev, REG_RCR, BIT_DISGCLK);
}
static void rtw_fw_read_fifo(struct rtw_dev *rtwdev, enum rtw_fw_fifo_sel sel,
@@ -1722,6 +1747,27 @@ void rtw_fw_channel_switch(struct rtw_dev *rtwdev, bool enable)
rtw_fw_send_h2c_packet(rtwdev, h2c_pkt);
}
+void rtw_fw_adaptivity(struct rtw_dev *rtwdev)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ u8 h2c_pkt[H2C_PKT_SIZE] = {0};
+
+ if (!rtw_edcca_enabled) {
+ dm_info->edcca_mode = RTW_EDCCA_NORMAL;
+ rtw_dbg(rtwdev, RTW_DBG_ADAPTIVITY,
+ "EDCCA disabled by debugfs\n");
+ }
+
+ SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_ADAPTIVITY);
+ SET_ADAPTIVITY_MODE(h2c_pkt, dm_info->edcca_mode);
+ SET_ADAPTIVITY_OPTION(h2c_pkt, 2);
+ SET_ADAPTIVITY_IGI(h2c_pkt, dm_info->igi_history[0]);
+ SET_ADAPTIVITY_L2H(h2c_pkt, dm_info->l2h_th_ini);
+ SET_ADAPTIVITY_DENSITY(h2c_pkt, dm_info->scan_density);
+
+ rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
+}
+
void rtw_fw_scan_notify(struct rtw_dev *rtwdev, bool start)
{
u8 h2c_pkt[H2C_PKT_SIZE] = {0};
diff --git a/drivers/net/wireless/realtek/rtw88/fw.h b/drivers/net/wireless/realtek/rtw88/fw.h
index 64dcde35a021..09c7afb99e63 100644
--- a/drivers/net/wireless/realtek/rtw88/fw.h
+++ b/drivers/net/wireless/realtek/rtw88/fw.h
@@ -41,6 +41,7 @@ enum rtw_c2h_cmd_id {
C2H_WLAN_INFO = 0x27,
C2H_WLAN_RFON = 0x32,
C2H_BCN_FILTER_NOTIFY = 0x36,
+ C2H_ADAPTIVITY = 0x37,
C2H_SCAN_RESULT = 0x38,
C2H_HW_FEATURE_DUMP = 0xfd,
C2H_HALMAC = 0xff,
@@ -56,6 +57,15 @@ struct rtw_c2h_cmd {
u8 payload[];
} __packed;
+struct rtw_c2h_adaptivity {
+ u8 density;
+ u8 igi;
+ u8 l2h_th_init;
+ u8 l2h;
+ u8 h2l;
+ u8 option;
+} __packed;
+
enum rtw_rsvd_packet_type {
RSVD_BEACON,
RSVD_DUMMY,
@@ -90,6 +100,7 @@ enum rtw_fw_feature {
FW_FEATURE_PG = BIT(3),
FW_FEATURE_BCN_FILTER = BIT(5),
FW_FEATURE_NOTIFY_SCAN = BIT(6),
+ FW_FEATURE_ADAPTIVITY = BIT(7),
FW_FEATURE_MAX = BIT(31),
};
@@ -375,6 +386,7 @@ static inline void rtw_h2c_pkt_set_header(u8 *h2c_pkt, u8 sub_id)
#define H2C_CMD_BCN_FILTER_OFFLOAD_P1 0x57
#define H2C_CMD_WL_PHY_INFO 0x58
#define H2C_CMD_SCAN 0x59
+#define H2C_CMD_ADAPTIVITY 0x5A
#define H2C_CMD_COEX_TDMA_TYPE 0x60
#define H2C_CMD_QUERY_BT_INFO 0x61
@@ -428,6 +440,17 @@ static inline void rtw_h2c_pkt_set_header(u8 *h2c_pkt, u8 sub_id)
#define SET_SCAN_START(h2c_pkt, value) \
le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(8))
+#define SET_ADAPTIVITY_MODE(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(11, 8))
+#define SET_ADAPTIVITY_OPTION(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(15, 12))
+#define SET_ADAPTIVITY_IGI(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(23, 16))
+#define SET_ADAPTIVITY_L2H(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(31, 24))
+#define SET_ADAPTIVITY_DENSITY(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(7, 0))
+
#define SET_PWR_MODE_SET_MODE(h2c_pkt, value) \
le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(14, 8))
#define SET_PWR_MODE_SET_RLBM(h2c_pkt, value) \
@@ -662,4 +685,5 @@ void rtw_fw_c2h_cmd_isr(struct rtw_dev *rtwdev);
int rtw_fw_dump_fifo(struct rtw_dev *rtwdev, u8 fifo_sel, u32 addr, u32 size,
u32 *buffer);
void rtw_fw_scan_notify(struct rtw_dev *rtwdev, bool start);
+void rtw_fw_adaptivity(struct rtw_dev *rtwdev);
#endif
diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c
index 6bb55e663fc3..a0d4d6e31fb4 100644
--- a/drivers/net/wireless/realtek/rtw88/main.c
+++ b/drivers/net/wireless/realtek/rtw88/main.c
@@ -23,6 +23,14 @@ EXPORT_SYMBOL(rtw_disable_lps_deep_mode);
bool rtw_bf_support = true;
unsigned int rtw_debug_mask;
EXPORT_SYMBOL(rtw_debug_mask);
+/* EDCCA is enabled during normal behavior. For debugging purpose in
+ * a noisy environment, it can be disabled via edcca debugfs. Because
+ * all rtw88 devices will probably be affected if environment is noisy,
+ * rtw_edcca_enabled is just declared by driver instead of by device.
+ * So, turning it off will take effect for all rtw88 devices before
+ * there is a tough reason to maintain rtw_edcca_enabled by device.
+ */
+bool rtw_edcca_enabled = true;
module_param_named(disable_lps_deep, rtw_disable_lps_deep_mode, bool, 0644);
module_param_named(support_bf, rtw_bf_support, bool, 0644);
@@ -556,6 +564,7 @@ static void __fw_recovery_work(struct rtw_dev *rtwdev)
int ret = 0;
set_bit(RTW_FLAG_RESTARTING, rtwdev->flags);
+ clear_bit(RTW_FLAG_RESTART_TRIGGERING, rtwdev->flags);
ret = rtw_fwcd_prep(rtwdev);
if (ret)
@@ -1964,7 +1973,11 @@ int rtw_register_hw(struct rtw_dev *rtwdev, struct ieee80211_hw *hw)
rtw_set_supported_band(hw, rtwdev->chip);
SET_IEEE80211_PERM_ADDR(hw, rtwdev->efuse.addr);
- rtw_regd_init(rtwdev, rtw_regd_notifier);
+ ret = rtw_regd_init(rtwdev);
+ if (ret) {
+ rtw_err(rtwdev, "failed to init regd\n");
+ return ret;
+ }
ret = ieee80211_register_hw(hw);
if (ret) {
@@ -1972,8 +1985,11 @@ int rtw_register_hw(struct rtw_dev *rtwdev, struct ieee80211_hw *hw)
return ret;
}
- if (regulatory_hint(hw->wiphy, rtwdev->regd.alpha2))
- rtw_err(rtwdev, "regulatory_hint fail\n");
+ ret = rtw_regd_hint(rtwdev);
+ if (ret) {
+ rtw_err(rtwdev, "failed to hint regd\n");
+ return ret;
+ }
rtw_debugfs_init(rtwdev);
diff --git a/drivers/net/wireless/realtek/rtw88/main.h b/drivers/net/wireless/realtek/rtw88/main.h
index 56812127a053..bbdd535b64e7 100644
--- a/drivers/net/wireless/realtek/rtw88/main.h
+++ b/drivers/net/wireless/realtek/rtw88/main.h
@@ -41,6 +41,7 @@
extern bool rtw_bf_support;
extern bool rtw_disable_lps_deep_mode;
extern unsigned int rtw_debug_mask;
+extern bool rtw_edcca_enabled;
extern const struct ieee80211_ops rtw_ops;
#define RTW_MAX_CHANNEL_NUM_2G 14
@@ -362,6 +363,7 @@ enum rtw_flags {
RTW_FLAG_BUSY_TRAFFIC,
RTW_FLAG_WOWLAN,
RTW_FLAG_RESTARTING,
+ RTW_FLAG_RESTART_TRIGGERING,
NUM_OF_RTW_FLAGS,
};
@@ -545,6 +547,11 @@ struct rtw_rf_sipi_addr {
u32 lssi_read_pi;
};
+struct rtw_hw_reg_offset {
+ struct rtw_hw_reg hw_reg;
+ u8 offset;
+};
+
struct rtw_backup_info {
u8 len;
u32 reg;
@@ -800,8 +807,22 @@ struct rtw_vif {
struct rtw_regulatory {
char alpha2[2];
- u8 chplan;
- u8 txpwr_regd;
+ u8 txpwr_regd_2g;
+ u8 txpwr_regd_5g;
+};
+
+enum rtw_regd_state {
+ RTW_REGD_STATE_WORLDWIDE,
+ RTW_REGD_STATE_PROGRAMMED,
+ RTW_REGD_STATE_SETTING,
+
+ RTW_REGD_STATE_NR,
+};
+
+struct rtw_regd {
+ enum rtw_regd_state state;
+ const struct rtw_regulatory *regulatory;
+ enum nl80211_dfs_regions dfs_region;
};
struct rtw_chip_ops {
@@ -839,6 +860,8 @@ struct rtw_chip_ops {
struct ieee80211_bss_conf *conf);
void (*cfg_csi_rate)(struct rtw_dev *rtwdev, u8 rssi, u8 cur_rate,
u8 fixrate_en, u8 *new_rate);
+ void (*adaptivity_init)(struct rtw_dev *rtwdev);
+ void (*adaptivity)(struct rtw_dev *rtwdev);
void (*cfo_init)(struct rtw_dev *rtwdev);
void (*cfo_track)(struct rtw_dev *rtwdev);
void (*config_tx_path)(struct rtw_dev *rtwdev, u8 tx_path,
@@ -1194,6 +1217,10 @@ struct rtw_chip_info {
u8 bfer_su_max_num;
u8 bfer_mu_max_num;
+ struct rtw_hw_reg_offset *edcca_th;
+ s8 l2h_th_ini_cs;
+ s8 l2h_th_ini_ad;
+
const char *wow_fw_name;
const struct wiphy_wowlan_support *wowlan_stub;
const u8 max_sched_scan_ssids;
@@ -1542,6 +1569,20 @@ struct rtw_gapk_info {
u8 channel;
};
+#define EDCCA_TH_L2H_IDX 0
+#define EDCCA_TH_H2L_IDX 1
+#define EDCCA_TH_L2H_LB 48
+#define EDCCA_ADC_BACKOFF 12
+#define EDCCA_IGI_BASE 50
+#define EDCCA_IGI_L2H_DIFF 8
+#define EDCCA_L2H_H2L_DIFF 7
+#define EDCCA_L2H_H2L_DIFF_NORMAL 8
+
+enum rtw_edcca_mode {
+ RTW_EDCCA_NORMAL = 0,
+ RTW_EDCCA_ADAPTIVITY = 1,
+};
+
struct rtw_cfo_track {
bool is_adjust;
u8 crystal_cap;
@@ -1633,6 +1674,8 @@ struct rtw_dm_info {
struct rtw_gapk_info gapk;
bool is_bt_iqk_timeout;
+ s8 l2h_th_ini;
+ enum rtw_edcca_mode edcca_mode;
u8 scan_density;
};
@@ -1833,7 +1876,7 @@ struct rtw_dev {
struct rtw_efuse efuse;
struct rtw_sec_desc sec;
struct rtw_traffic_stats stats;
- struct rtw_regulatory regd;
+ struct rtw_regd regd;
struct rtw_bf_info bf_info;
struct rtw_dm_info dm_info;
diff --git a/drivers/net/wireless/realtek/rtw88/phy.c b/drivers/net/wireless/realtek/rtw88/phy.c
index 569dd3cfde35..bfddfcbe63f5 100644
--- a/drivers/net/wireless/realtek/rtw88/phy.c
+++ b/drivers/net/wireless/realtek/rtw88/phy.c
@@ -9,6 +9,7 @@
#include "fw.h"
#include "phy.h"
#include "debug.h"
+#include "regd.h"
struct phy_cfg_pair {
u32 addr;
@@ -119,6 +120,63 @@ static void rtw_phy_cck_pd_init(struct rtw_dev *rtwdev)
dm_info->cck_fa_avg = CCK_FA_AVG_RESET;
}
+void rtw_phy_set_edcca_th(struct rtw_dev *rtwdev, u8 l2h, u8 h2l)
+{
+ struct rtw_hw_reg_offset *edcca_th = rtwdev->chip->edcca_th;
+
+ rtw_write32_mask(rtwdev,
+ edcca_th[EDCCA_TH_L2H_IDX].hw_reg.addr,
+ edcca_th[EDCCA_TH_L2H_IDX].hw_reg.mask,
+ l2h + edcca_th[EDCCA_TH_L2H_IDX].offset);
+ rtw_write32_mask(rtwdev,
+ edcca_th[EDCCA_TH_H2L_IDX].hw_reg.addr,
+ edcca_th[EDCCA_TH_H2L_IDX].hw_reg.mask,
+ h2l + edcca_th[EDCCA_TH_H2L_IDX].offset);
+}
+EXPORT_SYMBOL(rtw_phy_set_edcca_th);
+
+void rtw_phy_adaptivity_set_mode(struct rtw_dev *rtwdev)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+
+ /* turn off in debugfs for debug usage */
+ if (!rtw_edcca_enabled) {
+ dm_info->edcca_mode = RTW_EDCCA_NORMAL;
+ rtw_dbg(rtwdev, RTW_DBG_PHY, "EDCCA disabled, cannot be set\n");
+ return;
+ }
+
+ switch (rtwdev->regd.dfs_region) {
+ case NL80211_DFS_ETSI:
+ dm_info->edcca_mode = RTW_EDCCA_ADAPTIVITY;
+ dm_info->l2h_th_ini = chip->l2h_th_ini_ad;
+ break;
+ case NL80211_DFS_JP:
+ dm_info->edcca_mode = RTW_EDCCA_ADAPTIVITY;
+ dm_info->l2h_th_ini = chip->l2h_th_ini_cs;
+ break;
+ default:
+ dm_info->edcca_mode = RTW_EDCCA_NORMAL;
+ break;
+ }
+}
+
+static void rtw_phy_adaptivity_init(struct rtw_dev *rtwdev)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+
+ rtw_phy_adaptivity_set_mode(rtwdev);
+ if (chip->ops->adaptivity_init)
+ chip->ops->adaptivity_init(rtwdev);
+}
+
+static void rtw_phy_adaptivity(struct rtw_dev *rtwdev)
+{
+ if (rtwdev->chip->ops->adaptivity)
+ rtwdev->chip->ops->adaptivity(rtwdev);
+}
+
static void rtw_phy_cfo_init(struct rtw_dev *rtwdev)
{
struct rtw_chip_info *chip = rtwdev->chip;
@@ -159,6 +217,7 @@ void rtw_phy_init(struct rtw_dev *rtwdev)
rtw_phy_cck_pd_init(rtwdev);
dm_info->iqk.done = false;
+ rtw_phy_adaptivity_init(rtwdev);
rtw_phy_cfo_init(rtwdev);
rtw_phy_tx_path_div_init(rtwdev);
}
@@ -711,6 +770,11 @@ void rtw_phy_dynamic_mechanism(struct rtw_dev *rtwdev)
rtw_phy_cfo_track(rtwdev);
rtw_phy_dpk_track(rtwdev);
rtw_phy_pwr_track(rtwdev);
+
+ if (rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_ADAPTIVITY))
+ rtw_fw_adaptivity(rtwdev);
+ else
+ rtw_phy_adaptivity(rtwdev);
}
#define FRAC_BITS 3
@@ -1564,17 +1628,70 @@ static void rtw_xref_txpwr_lmt(struct rtw_dev *rtwdev)
rtw_xref_txpwr_lmt_by_bw(rtwdev, regd);
}
+static void
+__cfg_txpwr_lmt_by_alt(struct rtw_hal *hal, u8 regd, u8 regd_alt, u8 bw, u8 rs)
+{
+ u8 ch;
+
+ for (ch = 0; ch < RTW_MAX_CHANNEL_NUM_2G; ch++)
+ hal->tx_pwr_limit_2g[regd][bw][rs][ch] =
+ hal->tx_pwr_limit_2g[regd_alt][bw][rs][ch];
+
+ for (ch = 0; ch < RTW_MAX_CHANNEL_NUM_5G; ch++)
+ hal->tx_pwr_limit_5g[regd][bw][rs][ch] =
+ hal->tx_pwr_limit_5g[regd_alt][bw][rs][ch];
+}
+
+static void
+rtw_cfg_txpwr_lmt_by_alt(struct rtw_dev *rtwdev, u8 regd, u8 regd_alt)
+{
+ u8 bw, rs;
+
+ for (bw = 0; bw < RTW_CHANNEL_WIDTH_MAX; bw++)
+ for (rs = 0; rs < RTW_RATE_SECTION_MAX; rs++)
+ __cfg_txpwr_lmt_by_alt(&rtwdev->hal, regd, regd_alt,
+ bw, rs);
+}
+
void rtw_parse_tbl_txpwr_lmt(struct rtw_dev *rtwdev,
const struct rtw_table *tbl)
{
const struct rtw_txpwr_lmt_cfg_pair *p = tbl->data;
const struct rtw_txpwr_lmt_cfg_pair *end = p + tbl->size;
+ u32 regd_cfg_flag = 0;
+ u8 regd_alt;
+ u8 i;
for (; p < end; p++) {
+ regd_cfg_flag |= BIT(p->regd);
rtw_phy_set_tx_power_limit(rtwdev, p->regd, p->band,
p->bw, p->rs, p->ch, p->txpwr_lmt);
}
+ for (i = 0; i < RTW_REGD_MAX; i++) {
+ if (i == RTW_REGD_WW)
+ continue;
+
+ if (regd_cfg_flag & BIT(i))
+ continue;
+
+ rtw_dbg(rtwdev, RTW_DBG_REGD,
+ "txpwr regd %d does not be configured\n", i);
+
+ if (rtw_regd_has_alt(i, &regd_alt) &&
+ regd_cfg_flag & BIT(regd_alt)) {
+ rtw_dbg(rtwdev, RTW_DBG_REGD,
+ "cfg txpwr regd %d by regd %d as alternative\n",
+ i, regd_alt);
+
+ rtw_cfg_txpwr_lmt_by_alt(rtwdev, i, regd_alt);
+ continue;
+ }
+
+ rtw_dbg(rtwdev, RTW_DBG_REGD, "cfg txpwr regd %d by WW\n", i);
+ rtw_cfg_txpwr_lmt_by_alt(rtwdev, i, RTW_REGD_WW);
+ }
+
rtw_xref_txpwr_lmt(rtwdev);
}
EXPORT_SYMBOL(rtw_parse_tbl_txpwr_lmt);
@@ -2014,7 +2131,7 @@ static void rtw_phy_set_tx_power_index_by_rs(struct rtw_dev *rtwdev,
u8 ch, u8 path, u8 rs)
{
struct rtw_hal *hal = &rtwdev->hal;
- u8 regd = rtwdev->regd.txpwr_regd;
+ u8 regd = rtw_regd_get(rtwdev);
u8 *rates;
u8 size;
u8 rate;
diff --git a/drivers/net/wireless/realtek/rtw88/phy.h b/drivers/net/wireless/realtek/rtw88/phy.h
index 112ed125970a..02d1ec47ffb1 100644
--- a/drivers/net/wireless/realtek/rtw88/phy.h
+++ b/drivers/net/wireless/realtek/rtw88/phy.h
@@ -59,6 +59,8 @@ bool rtw_phy_pwrtrack_need_lck(struct rtw_dev *rtwdev);
bool rtw_phy_pwrtrack_need_iqk(struct rtw_dev *rtwdev);
void rtw_phy_config_swing_table(struct rtw_dev *rtwdev,
struct rtw_swing_table *swing_table);
+void rtw_phy_set_edcca_th(struct rtw_dev *rtwdev, u8 l2h, u8 h2l);
+void rtw_phy_adaptivity_set_mode(struct rtw_dev *rtwdev);
void rtw_phy_parsing_cfo(struct rtw_dev *rtwdev,
struct rtw_rx_pkt_stat *pkt_stat);
void rtw_phy_tx_path_diversity(struct rtw_dev *rtwdev);
diff --git a/drivers/net/wireless/realtek/rtw88/reg.h b/drivers/net/wireless/realtek/rtw88/reg.h
index f5ce75095e90..84ba9ec489c3 100644
--- a/drivers/net/wireless/realtek/rtw88/reg.h
+++ b/drivers/net/wireless/realtek/rtw88/reg.h
@@ -361,10 +361,12 @@
#define REG_AGGR_BREAK_TIME 0x051A
#define REG_SLOT 0x051B
#define REG_TX_PTCL_CTRL 0x0520
+#define BIT_DIS_EDCCA BIT(15)
#define BIT_SIFS_BK_EN BIT(12)
#define REG_TXPAUSE 0x0522
#define BIT_AC_QUEUE GENMASK(7, 0)
#define REG_RD_CTRL 0x0524
+#define BIT_EDCCA_MSK_CNTDOWN_EN BIT(11)
#define BIT_DIS_TXOP_CFE BIT(10)
#define BIT_DIS_LSIG_CFE BIT(9)
#define BIT_DIS_STBC_CFE BIT(8)
@@ -406,6 +408,7 @@
#define BIT_MFBEN BIT(22)
#define BIT_DISCHKPPDLLEN BIT(21)
#define BIT_PKTCTL_DLEN BIT(20)
+#define BIT_DISGCLK BIT(19)
#define BIT_TIM_PARSER_EN BIT(18)
#define BIT_BC_MD_EN BIT(17)
#define BIT_UC_MD_EN BIT(16)
@@ -640,6 +643,9 @@
#define REG_HRCV_MSG 0x1cf
+#define REG_EDCCA_REPORT 0x2d38
+#define BIT_EDCCA_FLAG BIT(24)
+
#define REG_IGN_GNTBT4 0x4160
#define RF_MODE 0x00
diff --git a/drivers/net/wireless/realtek/rtw88/regd.c b/drivers/net/wireless/realtek/rtw88/regd.c
index 69744dd65968..315c2b193e92 100644
--- a/drivers/net/wireless/realtek/rtw88/regd.c
+++ b/drivers/net/wireless/realtek/rtw88/regd.c
@@ -7,288 +7,274 @@
#include "debug.h"
#include "phy.h"
-#define COUNTRY_CHPLAN_ENT(_alpha2, _chplan, _txpwr_regd) \
+#define COUNTRY_REGD_ENT(_alpha2, _regd_2g, _regd_5g) \
{.alpha2 = (_alpha2), \
- .chplan = (_chplan), \
- .txpwr_regd = (_txpwr_regd) \
+ .txpwr_regd_2g = (_regd_2g), \
+ .txpwr_regd_5g = (_regd_5g), \
}
+#define rtw_dbg_regd_dump(_dev, _msg, _args...) \
+do { \
+ struct rtw_dev *__d = (_dev); \
+ const struct rtw_regd *__r = &__d->regd; \
+ rtw_dbg(__d, RTW_DBG_REGD, _msg \
+ "apply alpha2 %c%c, regd {%d, %d}, dfs_region %d\n",\
+ ##_args, \
+ __r->regulatory->alpha2[0], \
+ __r->regulatory->alpha2[1], \
+ __r->regulatory->txpwr_regd_2g, \
+ __r->regulatory->txpwr_regd_5g, \
+ __r->dfs_region); \
+} while (0)
+
/* If country code is not correctly defined in efuse,
* use worldwide country code and txpwr regd.
*/
-static const struct rtw_regulatory rtw_defined_chplan =
- COUNTRY_CHPLAN_ENT("00", RTW_CHPLAN_REALTEK_DEFINE, RTW_REGD_WW);
-
-static const struct rtw_regulatory all_chplan_map[] = {
- COUNTRY_CHPLAN_ENT("AD", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("AE", RTW_CHPLAN_WORLD_ETSI2, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("AF", RTW_CHPLAN_ETSI1_ETSI4, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("AG", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("AI", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("AL", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("AM", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("AN", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("AO", RTW_CHPLAN_WORLD_ETSI6, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("AQ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("AR", RTW_CHPLAN_FCC2_FCC7, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("AS", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("AT", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("AU", RTW_CHPLAN_WORLD_ACMA1, RTW_REGD_ACMA),
- COUNTRY_CHPLAN_ENT("AW", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("AZ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("BA", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("BB", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("BD", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("BE", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("BF", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("BG", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("BH", RTW_CHPLAN_WORLD_ETSI7, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("BI", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("BJ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("BM", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("BN", RTW_CHPLAN_WORLD_ETSI6, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("BO", RTW_CHPLAN_WORLD_FCC7, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("BR", RTW_CHPLAN_FCC2_FCC1, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("BS", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("BT", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("BV", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("BW", RTW_CHPLAN_WORLD_ETSI2, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("BY", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("BZ", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("CA", RTW_CHPLAN_IC1_IC2, RTW_REGD_IC),
- COUNTRY_CHPLAN_ENT("CC", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("CD", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("CF", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("CG", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("CH", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("CI", RTW_CHPLAN_ETSI1_ETSI4, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("CK", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("CL", RTW_CHPLAN_WORLD_CHILE1, RTW_REGD_CHILE),
- COUNTRY_CHPLAN_ENT("CM", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("CN", RTW_CHPLAN_WORLD_ETSI7, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("CO", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("CR", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("CV", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("CX", RTW_CHPLAN_WORLD_ACMA1, RTW_REGD_ACMA),
- COUNTRY_CHPLAN_ENT("CY", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("CZ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("DE", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("DJ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("DK", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("DM", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("DO", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("DZ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("EC", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("EE", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("EG", RTW_CHPLAN_WORLD_ETSI6, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("EH", RTW_CHPLAN_WORLD_ETSI6, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("ER", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("ES", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("ET", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("FI", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("FJ", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("FK", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("FM", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("FO", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("FR", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("GA", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("GB", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("GD", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("GE", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("GF", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("GG", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("GH", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("GI", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("GL", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("GM", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("GN", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("GP", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("GQ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("GR", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("GS", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("GT", RTW_CHPLAN_FCC2_FCC7, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("GU", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("GW", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("GY", RTW_CHPLAN_FCC1_NCC3, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("HK", RTW_CHPLAN_WORLD_ETSI2, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("HM", RTW_CHPLAN_WORLD_ACMA1, RTW_REGD_ACMA),
- COUNTRY_CHPLAN_ENT("HN", RTW_CHPLAN_WORLD_FCC5, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("HR", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("HT", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("HU", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("ID", RTW_CHPLAN_ETSI1_ETSI12, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("IE", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("IL", RTW_CHPLAN_WORLD_ETSI6, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("IM", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("IN", RTW_CHPLAN_WORLD_ETSI7, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("IO", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("IQ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("IR", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("IS", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("IT", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("JE", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("JM", RTW_CHPLAN_WORLD_FCC5, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("JO", RTW_CHPLAN_WORLD_ETSI8, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("JP", RTW_CHPLAN_MKK1_MKK1, RTW_REGD_MKK),
- COUNTRY_CHPLAN_ENT("KE", RTW_CHPLAN_WORLD_ETSI6, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("KG", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("KH", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("KI", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("KM", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("KN", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("KR", RTW_CHPLAN_KCC1_KCC3, RTW_REGD_KCC),
- COUNTRY_CHPLAN_ENT("KW", RTW_CHPLAN_WORLD_ETSI6, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("KY", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("KZ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("LA", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("LB", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("LC", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("LI", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("LK", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("LR", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("LS", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("LT", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("LU", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("LV", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("LY", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("MA", RTW_CHPLAN_WORLD_ETSI6, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("MC", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("MD", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("ME", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("MF", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("MG", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("MH", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("MK", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("ML", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("MM", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("MN", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("MO", RTW_CHPLAN_WORLD_ETSI2, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("MP", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("MQ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("MR", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("MS", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("MT", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("MU", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("MV", RTW_CHPLAN_WORLD_ETSI6, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("MW", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("MX", RTW_CHPLAN_FCC2_FCC7, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("MY", RTW_CHPLAN_WORLD_ETSI15, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("MZ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("NA", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("NC", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("NE", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("NF", RTW_CHPLAN_WORLD_ACMA1, RTW_REGD_ACMA),
- COUNTRY_CHPLAN_ENT("NG", RTW_CHPLAN_WORLD_ETSI20, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("NI", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("NL", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("NO", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("NP", RTW_CHPLAN_WORLD_ETSI7, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("NR", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("NU", RTW_CHPLAN_WORLD_ACMA1, RTW_REGD_ACMA),
- COUNTRY_CHPLAN_ENT("NZ", RTW_CHPLAN_WORLD_ACMA1, RTW_REGD_ACMA),
- COUNTRY_CHPLAN_ENT("OM", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("PA", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("PE", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("PF", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("PG", RTW_CHPLAN_WORLD_ETSI2, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("PH", RTW_CHPLAN_WORLD_ETSI2, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("PK", RTW_CHPLAN_WORLD_ETSI10, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("PL", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("PM", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("PR", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("PT", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("PW", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("PY", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("QA", RTW_CHPLAN_WORLD_ETSI2, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("RE", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("RO", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("RS", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("RU", RTW_CHPLAN_WORLD_ETSI14, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("RW", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("SA", RTW_CHPLAN_WORLD_ETSI2, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("SB", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("SC", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("SE", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("SG", RTW_CHPLAN_WORLD_ETSI2, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("SH", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("SI", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("SJ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("SK", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("SL", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("SM", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("SN", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("SO", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("SR", RTW_CHPLAN_FCC2_FCC17, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("ST", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("SV", RTW_CHPLAN_WORLD_FCC3, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("SX", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("SZ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("TC", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("TD", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("TF", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("TG", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("TH", RTW_CHPLAN_WORLD_ETSI2, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("TJ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("TK", RTW_CHPLAN_WORLD_ACMA1, RTW_REGD_ACMA),
- COUNTRY_CHPLAN_ENT("TM", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("TN", RTW_CHPLAN_WORLD_ETSI6, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("TO", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("TR", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("TT", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("TV", RTW_CHPLAN_ETSI1_NULL, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("TW", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("TZ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("UA", RTW_CHPLAN_WORLD_ETSI3, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("UG", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("US", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("UY", RTW_CHPLAN_WORLD_FCC3, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("UZ", RTW_CHPLAN_WORLD_ETSI6, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("VA", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("VC", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("VE", RTW_CHPLAN_WORLD_FCC3, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("VG", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("VI", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("VN", RTW_CHPLAN_WORLD_ETSI2, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("VU", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("WF", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("WS", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("YE", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("YT", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("ZA", RTW_CHPLAN_WORLD_ETSI2, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("ZM", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("ZW", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+static const struct rtw_regulatory rtw_reg_ww =
+ COUNTRY_REGD_ENT("00", RTW_REGD_WW, RTW_REGD_WW);
+
+static const struct rtw_regulatory rtw_reg_map[] = {
+ COUNTRY_REGD_ENT("AD", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("AE", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("AF", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("AG", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("AI", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("AL", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("AM", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("AN", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("AO", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("AQ", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("AR", RTW_REGD_MEXICO, RTW_REGD_MEXICO),
+ COUNTRY_REGD_ENT("AS", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("AT", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("AU", RTW_REGD_ACMA, RTW_REGD_ACMA),
+ COUNTRY_REGD_ENT("AW", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("AZ", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("BA", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("BB", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("BD", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("BE", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("BF", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("BG", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("BH", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("BI", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("BJ", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("BM", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("BN", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("BO", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("BR", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("BS", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("BT", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("BV", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("BW", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("BY", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("BZ", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("CA", RTW_REGD_IC, RTW_REGD_IC),
+ COUNTRY_REGD_ENT("CC", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("CD", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("CF", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("CG", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("CH", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("CI", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("CK", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("CL", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("CM", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("CN", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("CO", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("CR", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("CV", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("CX", RTW_REGD_ACMA, RTW_REGD_ACMA),
+ COUNTRY_REGD_ENT("CY", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("CZ", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("DE", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("DJ", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("DK", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("DM", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("DO", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("DZ", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("EC", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("EE", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("EG", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("EH", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("ER", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("ES", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("ET", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("FI", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("FJ", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("FK", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("FM", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("FO", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("FR", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("GA", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("GB", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("GD", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("GE", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("GF", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("GG", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("GH", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("GI", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("GL", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("GM", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("GN", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("GP", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("GQ", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("GR", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("GS", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("GT", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("GU", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("GW", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("GY", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("HK", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("HM", RTW_REGD_ACMA, RTW_REGD_ACMA),
+ COUNTRY_REGD_ENT("HN", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("HR", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("HT", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("HU", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("ID", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("IE", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("IL", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("IM", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("IN", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("IO", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("IQ", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("IR", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("IS", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("IT", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("JE", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("JM", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("JO", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("JP", RTW_REGD_MKK, RTW_REGD_MKK),
+ COUNTRY_REGD_ENT("KE", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("KG", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("KH", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("KI", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("KM", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("KN", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("KR", RTW_REGD_KCC, RTW_REGD_KCC),
+ COUNTRY_REGD_ENT("KW", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("KY", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("KZ", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("LA", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("LB", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("LC", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("LI", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("LK", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("LR", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("LS", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("LT", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("LU", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("LV", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("LY", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("MA", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("MC", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("MD", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("ME", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("MF", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("MG", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("MH", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("MK", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("ML", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("MM", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("MN", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("MO", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("MP", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("MQ", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("MR", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("MS", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("MT", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("MU", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("MV", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("MW", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("MX", RTW_REGD_MEXICO, RTW_REGD_MEXICO),
+ COUNTRY_REGD_ENT("MY", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("MZ", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("NA", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("NC", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("NE", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("NF", RTW_REGD_ACMA, RTW_REGD_ACMA),
+ COUNTRY_REGD_ENT("NG", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("NI", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("NL", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("NO", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("NP", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("NR", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("NU", RTW_REGD_ACMA, RTW_REGD_ACMA),
+ COUNTRY_REGD_ENT("NZ", RTW_REGD_ACMA, RTW_REGD_ACMA),
+ COUNTRY_REGD_ENT("OM", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("PA", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("PE", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("PF", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("PG", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("PH", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("PK", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("PL", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("PM", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("PR", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("PS", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("PT", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("PW", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("PY", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("QA", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("RE", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("RO", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("RS", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("RU", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("RW", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("SA", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("SB", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("SC", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("SE", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("SG", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("SH", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("SI", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("SJ", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("SK", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("SL", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("SM", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("SN", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("SO", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("SR", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("ST", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("SV", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("SX", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("SZ", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("TC", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("TD", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("TF", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("TG", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("TH", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("TJ", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("TK", RTW_REGD_ACMA, RTW_REGD_ACMA),
+ COUNTRY_REGD_ENT("TM", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("TN", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("TO", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("TR", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("TT", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("TV", RTW_REGD_ETSI, RTW_REGD_WW),
+ COUNTRY_REGD_ENT("TW", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("TZ", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("UA", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("UG", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("US", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("UY", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("UZ", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("VA", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("VC", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("VE", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("VG", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("VI", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("VN", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("VU", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("WF", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("WS", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("XK", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("YE", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("YT", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("ZA", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("ZM", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("ZW", RTW_REGD_ETSI, RTW_REGD_ETSI),
};
-static void rtw_regd_apply_beaconing_flags(struct wiphy *wiphy,
- enum nl80211_reg_initiator initiator)
-{
- enum nl80211_band band;
- struct ieee80211_supported_band *sband;
- const struct ieee80211_reg_rule *reg_rule;
- struct ieee80211_channel *ch;
- unsigned int i;
-
- for (band = 0; band < NUM_NL80211_BANDS; band++) {
- if (!wiphy->bands[band])
- continue;
-
- sband = wiphy->bands[band];
- for (i = 0; i < sband->n_channels; i++) {
- ch = &sband->channels[i];
-
- reg_rule = freq_reg_info(wiphy,
- MHZ_TO_KHZ(ch->center_freq));
- if (IS_ERR(reg_rule))
- continue;
-
- ch->flags &= ~IEEE80211_CHAN_DISABLED;
-
- if (!(reg_rule->flags & NL80211_RRF_NO_IR))
- ch->flags &= ~IEEE80211_CHAN_NO_IR;
- }
- }
-}
-
static void rtw_regd_apply_hw_cap_flags(struct wiphy *wiphy)
{
struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
@@ -321,78 +307,223 @@ out_5g:
}
}
-static void rtw_regd_apply_world_flags(struct wiphy *wiphy,
- enum nl80211_reg_initiator initiator)
+static bool rtw_reg_is_ww(const struct rtw_regulatory *reg)
{
- rtw_regd_apply_beaconing_flags(wiphy, initiator);
+ return reg == &rtw_reg_ww;
}
-static struct rtw_regulatory rtw_regd_find_reg_by_name(char *alpha2)
+static bool rtw_reg_match(const struct rtw_regulatory *reg, const char *alpha2)
+{
+ return memcmp(reg->alpha2, alpha2, 2) == 0;
+}
+
+static const struct rtw_regulatory *rtw_reg_find_by_name(const char *alpha2)
{
unsigned int i;
- for (i = 0; i < ARRAY_SIZE(all_chplan_map); i++) {
- if (!memcmp(all_chplan_map[i].alpha2, alpha2, 2))
- return all_chplan_map[i];
+ for (i = 0; i < ARRAY_SIZE(rtw_reg_map); i++) {
+ if (rtw_reg_match(&rtw_reg_map[i], alpha2))
+ return &rtw_reg_map[i];
}
- return rtw_defined_chplan;
+ return &rtw_reg_ww;
}
-static int rtw_regd_notifier_apply(struct rtw_dev *rtwdev,
- struct wiphy *wiphy,
- struct regulatory_request *request)
+static
+void rtw_regd_notifier(struct wiphy *wiphy, struct regulatory_request *request);
+
+/* call this before ieee80211_register_hw() */
+int rtw_regd_init(struct rtw_dev *rtwdev)
{
- if (request->initiator == NL80211_REGDOM_SET_BY_USER)
- return 0;
- rtwdev->regd = rtw_regd_find_reg_by_name(request->alpha2);
- rtw_regd_apply_world_flags(wiphy, request->initiator);
+ struct wiphy *wiphy = rtwdev->hw->wiphy;
+ const struct rtw_regulatory *chip_reg;
- return 0;
-}
+ if (!wiphy)
+ return -EINVAL;
-static int
-rtw_regd_init_wiphy(struct rtw_regulatory *reg, struct wiphy *wiphy,
- void (*reg_notifier)(struct wiphy *wiphy,
- struct regulatory_request *request))
-{
- wiphy->reg_notifier = reg_notifier;
+ wiphy->reg_notifier = rtw_regd_notifier;
- wiphy->regulatory_flags &= ~REGULATORY_CUSTOM_REG;
- wiphy->regulatory_flags &= ~REGULATORY_STRICT_REG;
- wiphy->regulatory_flags &= ~REGULATORY_DISABLE_BEACON_HINTS;
+ chip_reg = rtw_reg_find_by_name(rtwdev->efuse.country_code);
+ if (!rtw_reg_is_ww(chip_reg)) {
+ rtwdev->regd.state = RTW_REGD_STATE_PROGRAMMED;
- rtw_regd_apply_hw_cap_flags(wiphy);
+ /* Set REGULATORY_STRICT_REG before ieee80211_register_hw(),
+ * stack will wait for regulatory_hint() and consider it
+ * as the superset for our regulatory rule.
+ */
+ wiphy->regulatory_flags |= REGULATORY_STRICT_REG;
+ wiphy->regulatory_flags |= REGULATORY_COUNTRY_IE_IGNORE;
+ } else {
+ rtwdev->regd.state = RTW_REGD_STATE_WORLDWIDE;
+ }
+ rtwdev->regd.regulatory = &rtw_reg_ww;
+ rtwdev->regd.dfs_region = NL80211_DFS_UNSET;
+ rtw_dbg_regd_dump(rtwdev, "regd init state %d: ", rtwdev->regd.state);
+
+ rtw_regd_apply_hw_cap_flags(wiphy);
return 0;
}
-int rtw_regd_init(struct rtw_dev *rtwdev,
- void (*reg_notifier)(struct wiphy *wiphy,
- struct regulatory_request *request))
+/* call this after ieee80211_register_hw() */
+int rtw_regd_hint(struct rtw_dev *rtwdev)
{
struct wiphy *wiphy = rtwdev->hw->wiphy;
+ int ret;
if (!wiphy)
return -EINVAL;
- rtwdev->regd = rtw_regd_find_reg_by_name(rtwdev->efuse.country_code);
- rtw_regd_init_wiphy(&rtwdev->regd, wiphy, reg_notifier);
+ if (rtwdev->regd.state == RTW_REGD_STATE_PROGRAMMED) {
+ rtw_dbg(rtwdev, RTW_DBG_REGD,
+ "country domain %c%c is PGed on efuse",
+ rtwdev->efuse.country_code[0],
+ rtwdev->efuse.country_code[1]);
+
+ ret = regulatory_hint(wiphy, rtwdev->efuse.country_code);
+ if (ret) {
+ rtw_warn(rtwdev,
+ "failed to hint regulatory: %d\n", ret);
+ return ret;
+ }
+ }
return 0;
}
+static bool rtw_regd_mgmt_worldwide(struct rtw_dev *rtwdev,
+ struct rtw_regd *next_regd,
+ struct regulatory_request *request)
+{
+ struct wiphy *wiphy = rtwdev->hw->wiphy;
+
+ next_regd->state = RTW_REGD_STATE_WORLDWIDE;
+
+ if (request->initiator == NL80211_REGDOM_SET_BY_USER &&
+ !rtw_reg_is_ww(next_regd->regulatory)) {
+ next_regd->state = RTW_REGD_STATE_SETTING;
+ wiphy->regulatory_flags |= REGULATORY_COUNTRY_IE_IGNORE;
+ }
+
+ return true;
+}
+
+static bool rtw_regd_mgmt_programmed(struct rtw_dev *rtwdev,
+ struct rtw_regd *next_regd,
+ struct regulatory_request *request)
+{
+ if (request->initiator == NL80211_REGDOM_SET_BY_DRIVER &&
+ rtw_reg_match(next_regd->regulatory, rtwdev->efuse.country_code)) {
+ next_regd->state = RTW_REGD_STATE_PROGRAMMED;
+ return true;
+ }
+
+ return false;
+}
+
+static bool rtw_regd_mgmt_setting(struct rtw_dev *rtwdev,
+ struct rtw_regd *next_regd,
+ struct regulatory_request *request)
+{
+ struct wiphy *wiphy = rtwdev->hw->wiphy;
+
+ if (request->initiator != NL80211_REGDOM_SET_BY_USER)
+ return false;
+
+ next_regd->state = RTW_REGD_STATE_SETTING;
+
+ if (rtw_reg_is_ww(next_regd->regulatory)) {
+ next_regd->state = RTW_REGD_STATE_WORLDWIDE;
+ wiphy->regulatory_flags &= ~REGULATORY_COUNTRY_IE_IGNORE;
+ }
+
+ return true;
+}
+
+static bool (*const rtw_regd_handler[RTW_REGD_STATE_NR])
+ (struct rtw_dev *, struct rtw_regd *, struct regulatory_request *) = {
+ [RTW_REGD_STATE_WORLDWIDE] = rtw_regd_mgmt_worldwide,
+ [RTW_REGD_STATE_PROGRAMMED] = rtw_regd_mgmt_programmed,
+ [RTW_REGD_STATE_SETTING] = rtw_regd_mgmt_setting,
+};
+
+static bool rtw_regd_state_hdl(struct rtw_dev *rtwdev,
+ struct rtw_regd *next_regd,
+ struct regulatory_request *request)
+{
+ next_regd->regulatory = rtw_reg_find_by_name(request->alpha2);
+ next_regd->dfs_region = request->dfs_region;
+ return rtw_regd_handler[rtwdev->regd.state](rtwdev, next_regd, request);
+}
+
+static
void rtw_regd_notifier(struct wiphy *wiphy, struct regulatory_request *request)
{
struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
struct rtw_dev *rtwdev = hw->priv;
struct rtw_hal *hal = &rtwdev->hal;
+ struct rtw_regd next_regd = {0};
+ bool hdl;
+
+ hdl = rtw_regd_state_hdl(rtwdev, &next_regd, request);
+ if (!hdl) {
+ rtw_dbg(rtwdev, RTW_DBG_REGD,
+ "regd state %d: ignore request %c%c of initiator %d\n",
+ rtwdev->regd.state,
+ request->alpha2[0],
+ request->alpha2[1],
+ request->initiator);
+ return;
+ }
+
+ rtw_dbg(rtwdev, RTW_DBG_REGD, "regd state: %d -> %d\n",
+ rtwdev->regd.state, next_regd.state);
- rtw_regd_notifier_apply(rtwdev, wiphy, request);
- rtw_dbg(rtwdev, RTW_DBG_REGD,
- "get alpha2 %c%c from initiator %d, mapping to chplan 0x%x, txregd %d\n",
- request->alpha2[0], request->alpha2[1], request->initiator,
- rtwdev->regd.chplan, rtwdev->regd.txpwr_regd);
+ rtwdev->regd = next_regd;
+ rtw_dbg_regd_dump(rtwdev, "get alpha2 %c%c from initiator %d: ",
+ request->alpha2[0],
+ request->alpha2[1],
+ request->initiator);
+ rtw_phy_adaptivity_set_mode(rtwdev);
rtw_phy_set_tx_power_level(rtwdev, hal->current_channel);
}
+
+u8 rtw_regd_get(struct rtw_dev *rtwdev)
+{
+ struct rtw_hal *hal = &rtwdev->hal;
+ u8 band = hal->current_band_type;
+
+ return band == RTW_BAND_2G ?
+ rtwdev->regd.regulatory->txpwr_regd_2g :
+ rtwdev->regd.regulatory->txpwr_regd_5g;
+}
+EXPORT_SYMBOL(rtw_regd_get);
+
+struct rtw_regd_alternative_t {
+ bool set;
+ u8 alt;
+};
+
+#define DECL_REGD_ALT(_regd, _regd_alt) \
+ [(_regd)] = {.set = true, .alt = (_regd_alt)}
+
+static const struct rtw_regd_alternative_t
+rtw_regd_alt[RTW_REGD_MAX] = {
+ DECL_REGD_ALT(RTW_REGD_IC, RTW_REGD_FCC),
+ DECL_REGD_ALT(RTW_REGD_KCC, RTW_REGD_ETSI),
+ DECL_REGD_ALT(RTW_REGD_ACMA, RTW_REGD_ETSI),
+ DECL_REGD_ALT(RTW_REGD_CHILE, RTW_REGD_FCC),
+ DECL_REGD_ALT(RTW_REGD_UKRAINE, RTW_REGD_ETSI),
+ DECL_REGD_ALT(RTW_REGD_MEXICO, RTW_REGD_FCC),
+ DECL_REGD_ALT(RTW_REGD_CN, RTW_REGD_ETSI),
+};
+
+bool rtw_regd_has_alt(u8 regd, u8 *regd_alt)
+{
+ if (!rtw_regd_alt[regd].set)
+ return false;
+
+ *regd_alt = rtw_regd_alt[regd].alt;
+ return true;
+}
diff --git a/drivers/net/wireless/realtek/rtw88/regd.h b/drivers/net/wireless/realtek/rtw88/regd.h
index 5d4578331788..34cb13d0cd9e 100644
--- a/drivers/net/wireless/realtek/rtw88/regd.h
+++ b/drivers/net/wireless/realtek/rtw88/regd.h
@@ -64,8 +64,8 @@ enum country_code_type {
COUNTRY_CODE_MAX
};
-int rtw_regd_init(struct rtw_dev *rtwdev,
- void (*reg_notifier)(struct wiphy *wiphy,
- struct regulatory_request *request));
-void rtw_regd_notifier(struct wiphy *wiphy, struct regulatory_request *request);
+int rtw_regd_init(struct rtw_dev *rtwdev);
+int rtw_regd_hint(struct rtw_dev *rtwdev);
+u8 rtw_regd_get(struct rtw_dev *rtwdev);
+bool rtw_regd_has_alt(u8 regd, u8 *regd_alt);
#endif
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c.c b/drivers/net/wireless/realtek/rtw88/rtw8821c.c
index 785b8181513f..80a6f4da6acd 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8821c.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8821c.c
@@ -14,6 +14,7 @@
#include "reg.h"
#include "debug.h"
#include "bf.h"
+#include "regd.h"
static const s8 lna_gain_table_0[8] = {22, 8, -6, -22, -31, -40, -46, -52};
static const s8 lna_gain_table_1[16] = {10, 6, 2, -2, -6, -10, -14, -17,
@@ -60,6 +61,9 @@ static int rtw8821c_read_efuse(struct rtw_dev *rtwdev, u8 *log_map)
for (i = 0; i < 4; i++)
efuse->txpwr_idx_table[i] = map->txpwr_idx_table[i];
+ if (rtwdev->efuse.rfe_option == 2 || rtwdev->efuse.rfe_option == 4)
+ efuse->txpwr_idx_table[0].pwr_idx_2g = map->txpwr_idx_table[1].pwr_idx_2g;
+
switch (rtw_hci_type(rtwdev)) {
case RTW_HCI_TYPE_PCIE:
rtw8821ce_efuse_parsing(efuse, map);
@@ -304,7 +308,8 @@ static void rtw8821c_set_channel_rf(struct rtw_dev *rtwdev, u8 channel, u8 bw)
if (channel <= 14) {
if (rtwdev->efuse.rfe_option == 0)
rtw8821c_switch_rf_set(rtwdev, SWITCH_TO_WLG);
- else if (rtwdev->efuse.rfe_option == 2)
+ else if (rtwdev->efuse.rfe_option == 2 ||
+ rtwdev->efuse.rfe_option == 4)
rtw8821c_switch_rf_set(rtwdev, SWITCH_TO_BTG);
rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTDBG, BIT(6), 0x1);
rtw_write_rf(rtwdev, RF_PATH_A, 0x64, 0xf, 0xf);
@@ -773,6 +778,15 @@ static void rtw8821c_coex_cfg_ant_switch(struct rtw_dev *rtwdev, u8 ctrl_type,
if (switch_status == coex_dm->cur_switch_status)
return;
+ if (coex_rfe->wlg_at_btg) {
+ ctrl_type = COEX_SWITCH_CTRL_BY_BBSW;
+
+ if (coex_rfe->ant_switch_polarity)
+ pos_type = COEX_SWITCH_TO_WLA;
+ else
+ pos_type = COEX_SWITCH_TO_WLG_BT;
+ }
+
coex_dm->cur_switch_status = switch_status;
if (coex_rfe->ant_switch_diversity &&
@@ -993,7 +1007,7 @@ static void rtw8821c_pwrtrack_set(struct rtw_dev *rtwdev)
s8 pwr_idx_offset_lower;
u8 channel = rtwdev->hal.current_channel;
u8 band_width = rtwdev->hal.current_band_width;
- u8 regd = rtwdev->regd.txpwr_regd;
+ u8 regd = rtw_regd_get(rtwdev);
u8 tx_rate = dm_info->tx_rate;
u8 max_pwr_idx = rtwdev->chip->max_power_index;
@@ -1498,6 +1512,7 @@ static const struct rtw_intf_phy_para_table phy_para_table_8821c = {
static const struct rtw_rfe_def rtw8821c_rfe_defs[] = {
[0] = RTW_DEF_RFE(8821c, 0, 0),
[2] = RTW_DEF_RFE_EXT(8821c, 0, 0, 2),
+ [4] = RTW_DEF_RFE_EXT(8821c, 0, 0, 2),
};
static struct rtw_hw_reg rtw8821c_dig[] = {
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822b.c b/drivers/net/wireless/realtek/rtw88/rtw8822b.c
index f1789155e901..c409c8c29ec8 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822b.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822b.c
@@ -15,6 +15,7 @@
#include "reg.h"
#include "debug.h"
#include "bf.h"
+#include "regd.h"
static void rtw8822b_config_trx_mode(struct rtw_dev *rtwdev, u8 tx_path,
u8 rx_path, bool is_tx2_path);
@@ -1436,7 +1437,7 @@ static void rtw8822b_pwrtrack_set(struct rtw_dev *rtwdev, u8 path)
u8 pwr_idx_offset, tx_pwr_idx;
u8 channel = rtwdev->hal.current_channel;
u8 band_width = rtwdev->hal.current_band_width;
- u8 regd = rtwdev->regd.txpwr_regd;
+ u8 regd = rtw_regd_get(rtwdev);
u8 tx_rate = dm_info->tx_rate;
u8 max_pwr_idx = rtwdev->chip->max_power_index;
@@ -1552,6 +1553,39 @@ static void rtw8822b_bf_config_bfee(struct rtw_dev *rtwdev, struct rtw_vif *vif,
rtw_warn(rtwdev, "wrong bfee role\n");
}
+static void rtw8822b_adaptivity_init(struct rtw_dev *rtwdev)
+{
+ rtw_phy_set_edcca_th(rtwdev, RTW8822B_EDCCA_MAX, RTW8822B_EDCCA_MAX);
+
+ /* mac edcca state setting */
+ rtw_write32_clr(rtwdev, REG_TX_PTCL_CTRL, BIT_DIS_EDCCA);
+ rtw_write32_set(rtwdev, REG_RD_CTRL, BIT_EDCCA_MSK_CNTDOWN_EN);
+ rtw_write32_mask(rtwdev, REG_EDCCA_SOURCE, BIT_SOURCE_OPTION,
+ RTW8822B_EDCCA_SRC_DEF);
+ rtw_write32_mask(rtwdev, REG_EDCCA_POW_MA, BIT_MA_LEVEL, 0);
+
+ /* edcca decision opt */
+ rtw_write32_set(rtwdev, REG_EDCCA_DECISION, BIT_EDCCA_OPTION);
+}
+
+static void rtw8822b_adaptivity(struct rtw_dev *rtwdev)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ s8 l2h, h2l;
+ u8 igi;
+
+ igi = dm_info->igi_history[0];
+ if (dm_info->edcca_mode == RTW_EDCCA_NORMAL) {
+ l2h = max_t(s8, igi + EDCCA_IGI_L2H_DIFF, EDCCA_TH_L2H_LB);
+ h2l = l2h - EDCCA_L2H_H2L_DIFF_NORMAL;
+ } else {
+ l2h = min_t(s8, igi, dm_info->l2h_th_ini);
+ h2l = l2h - EDCCA_L2H_H2L_DIFF;
+ }
+
+ rtw_phy_set_edcca_th(rtwdev, l2h, h2l);
+}
+
static const struct rtw_pwr_seq_cmd trans_carddis_to_cardemu_8822b[] = {
{0x0086,
RTW_PWR_CUT_ALL_MSK,
@@ -2125,6 +2159,8 @@ static struct rtw_chip_ops rtw8822b_ops = {
.config_bfee = rtw8822b_bf_config_bfee,
.set_gid_table = rtw_bf_set_gid_table,
.cfg_csi_rate = rtw_bf_cfg_csi_rate,
+ .adaptivity_init = rtw8822b_adaptivity_init,
+ .adaptivity = rtw8822b_adaptivity,
.coex_set_init = rtw8822b_coex_cfg_init,
.coex_set_ant_switch = rtw8822b_coex_cfg_ant_switch,
@@ -2454,6 +2490,11 @@ static const struct rtw_reg_domain coex_info_hw_regs_8822b[] = {
{0xc50, MASKBYTE0, RTW_REG_DOMAIN_MAC8},
};
+static struct rtw_hw_reg_offset rtw8822b_edcca_th[] = {
+ [EDCCA_TH_L2H_IDX] = {{.addr = 0x8a4, .mask = MASKBYTE0}, .offset = 0},
+ [EDCCA_TH_H2L_IDX] = {{.addr = 0x8a4, .mask = MASKBYTE1}, .offset = 0},
+};
+
struct rtw_chip_info rtw8822b_hw_spec = {
.ops = &rtw8822b_ops,
.id = RTW_CHIP_TYPE_8822B,
@@ -2502,6 +2543,9 @@ struct rtw_chip_info rtw8822b_hw_spec = {
.bfer_su_max_num = 2,
.bfer_mu_max_num = 1,
.rx_ldpc = true,
+ .edcca_th = rtw8822b_edcca_th,
+ .l2h_th_ini_cs = 10 + EDCCA_IGI_BASE,
+ .l2h_th_ini_ad = -14 + EDCCA_IGI_BASE,
.coex_para_ver = 0x20070206,
.bt_desired_ver = 0x6,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822b.h b/drivers/net/wireless/realtek/rtw88/rtw8822b.h
index 6211f4b547b9..3fff8b881854 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822b.h
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822b.h
@@ -140,6 +140,8 @@ _rtw_write32s_mask(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 data)
#define GET_PHY_STAT_P1_RXSNR_B(phy_stat) \
le32_get_bits(*((__le32 *)(phy_stat) + 0x06), GENMASK(15, 8))
+#define RTW8822B_EDCCA_MAX 0x7f
+#define RTW8822B_EDCCA_SRC_DEF 1
#define REG_HTSTFWT 0x800
#define REG_RXPSEL 0x808
#define BIT_RX_PSEL_RST (BIT(28) | BIT(29))
@@ -152,11 +154,17 @@ _rtw_write32s_mask(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 data)
#define REG_L1PKWT 0x840
#define REG_MRC 0x850
#define REG_CLKTRK 0x860
+#define REG_EDCCA_POW_MA 0x8a0
+#define BIT_MA_LEVEL GENMASK(1, 0)
#define REG_ADCCLK 0x8ac
#define REG_ADC160 0x8c4
#define REG_ADC40 0x8c8
+#define REG_EDCCA_DECISION 0x8dc
+#define BIT_EDCCA_OPTION BIT(5)
#define REG_CDDTXP 0x93c
#define REG_TXPSEL1 0x940
+#define REG_EDCCA_SOURCE 0x944
+#define BIT_SOURCE_OPTION GENMASK(29, 28)
#define REG_ACBB0 0x948
#define REG_ACBBRXFIR 0x94c
#define REG_ACGG2TBL 0x958
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.c b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
index f3ad079967a6..46b881e8e4fe 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822c.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
@@ -4497,6 +4497,39 @@ static void rtw8822c_pwr_track(struct rtw_dev *rtwdev)
dm_info->pwr_trk_triggered = false;
}
+static void rtw8822c_adaptivity_init(struct rtw_dev *rtwdev)
+{
+ rtw_phy_set_edcca_th(rtwdev, RTW8822C_EDCCA_MAX, RTW8822C_EDCCA_MAX);
+
+ /* mac edcca state setting */
+ rtw_write32_clr(rtwdev, REG_TX_PTCL_CTRL, BIT_DIS_EDCCA);
+ rtw_write32_set(rtwdev, REG_RD_CTRL, BIT_EDCCA_MSK_CNTDOWN_EN);
+
+ /* edcca decistion opt */
+ rtw_write32_clr(rtwdev, REG_EDCCA_DECISION, BIT_EDCCA_OPTION);
+}
+
+static void rtw8822c_adaptivity(struct rtw_dev *rtwdev)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ s8 l2h, h2l;
+ u8 igi;
+
+ igi = dm_info->igi_history[0];
+ if (dm_info->edcca_mode == RTW_EDCCA_NORMAL) {
+ l2h = max_t(s8, igi + EDCCA_IGI_L2H_DIFF, EDCCA_TH_L2H_LB);
+ h2l = l2h - EDCCA_L2H_H2L_DIFF_NORMAL;
+ } else {
+ if (igi < dm_info->l2h_th_ini - EDCCA_ADC_BACKOFF)
+ l2h = igi + EDCCA_ADC_BACKOFF;
+ else
+ l2h = dm_info->l2h_th_ini;
+ h2l = l2h - EDCCA_L2H_H2L_DIFF;
+ }
+
+ rtw_phy_set_edcca_th(rtwdev, l2h, h2l);
+}
+
static const struct rtw_pwr_seq_cmd trans_carddis_to_cardemu_8822c[] = {
{0x0086,
RTW_PWR_CUT_ALL_MSK,
@@ -4912,6 +4945,8 @@ static struct rtw_chip_ops rtw8822c_ops = {
.config_bfee = rtw8822c_bf_config_bfee,
.set_gid_table = rtw_bf_set_gid_table,
.cfg_csi_rate = rtw_bf_cfg_csi_rate,
+ .adaptivity_init = rtw8822c_adaptivity_init,
+ .adaptivity = rtw8822c_adaptivity,
.cfo_init = rtw8822c_cfo_init,
.cfo_track = rtw8822c_cfo_track,
.config_tx_path = rtw8822c_config_tx_path,
@@ -5197,6 +5232,15 @@ static const struct rtw_pwr_track_tbl rtw8822c_rtw_pwr_track_tbl = {
.pwrtrk_2g_ccka_p = rtw8822c_pwrtrk_2g_cck_a_p,
};
+static struct rtw_hw_reg_offset rtw8822c_edcca_th[] = {
+ [EDCCA_TH_L2H_IDX] = {
+ {.addr = 0x84c, .mask = MASKBYTE2}, .offset = 0x80
+ },
+ [EDCCA_TH_H2L_IDX] = {
+ {.addr = 0x84c, .mask = MASKBYTE3}, .offset = 0x80
+ },
+};
+
#ifdef CONFIG_PM
static const struct wiphy_wowlan_support rtw_wowlan_stub_8822c = {
.flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_GTK_REKEY_FAILURE |
@@ -5289,6 +5333,9 @@ struct rtw_chip_info rtw8822c_hw_spec = {
.bfer_mu_max_num = 1,
.rx_ldpc = true,
.tx_stbc = true,
+ .edcca_th = rtw8822c_edcca_th,
+ .l2h_th_ini_cs = 60,
+ .l2h_th_ini_ad = 45,
#ifdef CONFIG_PM
.wow_fw_name = "rtw88/rtw8822c_wow_fw.bin",
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.h b/drivers/net/wireless/realtek/rtw88/rtw8822c.h
index 364afc6d851b..3df627419d81 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822c.h
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.h
@@ -162,6 +162,7 @@ const struct rtw_table name ## _tbl = { \
#define GET_PHY_STAT_P1_RXSNR_B(phy_stat) \
le32_get_bits(*((__le32 *)(phy_stat) + 0x06), GENMASK(15, 8))
+#define RTW8822C_EDCCA_MAX 0x7f
#define REG_ANAPARLDO_POW_MAC 0x0029
#define BIT_LDOE25_PON BIT(0)
#define XCAP_MASK GENMASK(6, 0)
@@ -174,6 +175,8 @@ const struct rtw_table name ## _tbl = { \
#define REG_ANTMAP0 0x820
#define BIT_ANT_PATH GENMASK(1, 0)
#define REG_ANTMAP 0x824
+#define REG_EDCCA_DECISION 0x844
+#define BIT_EDCCA_OPTION GENMASK(30, 29)
#define REG_DYMPRITH 0x86c
#define REG_DYMENTH0 0x870
#define REG_DYMENTH 0x874
diff --git a/drivers/net/wireless/rsi/rsi_91x_core.c b/drivers/net/wireless/rsi/rsi_91x_core.c
index a48e616e0fb9..6bfaab48b507 100644
--- a/drivers/net/wireless/rsi/rsi_91x_core.c
+++ b/drivers/net/wireless/rsi/rsi_91x_core.c
@@ -399,6 +399,8 @@ void rsi_core_xmit(struct rsi_common *common, struct sk_buff *skb)
info = IEEE80211_SKB_CB(skb);
tx_params = (struct skb_info *)info->driver_data;
+ /* info->driver_data and info->control part of union so make copy */
+ tx_params->have_key = !!info->control.hw_key;
wh = (struct ieee80211_hdr *)&skb->data[0];
tx_params->sta_id = 0;
diff --git a/drivers/net/wireless/rsi/rsi_91x_hal.c b/drivers/net/wireless/rsi/rsi_91x_hal.c
index f4a26f16f00f..dca81a4bbdd7 100644
--- a/drivers/net/wireless/rsi/rsi_91x_hal.c
+++ b/drivers/net/wireless/rsi/rsi_91x_hal.c
@@ -203,7 +203,7 @@ int rsi_prepare_data_desc(struct rsi_common *common, struct sk_buff *skb)
wh->frame_control |= cpu_to_le16(RSI_SET_PS_ENABLE);
if ((!(info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT)) &&
- info->control.hw_key) {
+ tx_params->have_key) {
if (rsi_is_cipher_wep(common))
ieee80211_size += 4;
else
@@ -214,15 +214,17 @@ int rsi_prepare_data_desc(struct rsi_common *common, struct sk_buff *skb)
RSI_WIFI_DATA_Q);
data_desc->header_len = ieee80211_size;
- if (common->min_rate != RSI_RATE_AUTO) {
+ if (common->rate_config[common->band].fixed_enabled) {
/* Send fixed rate */
+ u16 fixed_rate = common->rate_config[common->band].fixed_hw_rate;
+
data_desc->frame_info = cpu_to_le16(RATE_INFO_ENABLE);
- data_desc->rate_info = cpu_to_le16(common->min_rate);
+ data_desc->rate_info = cpu_to_le16(fixed_rate);
if (conf_is_ht40(&common->priv->hw->conf))
data_desc->bbp_info = cpu_to_le16(FULL40M_ENABLE);
- if ((common->vif_info[0].sgi) && (common->min_rate & 0x100)) {
+ if (common->vif_info[0].sgi && (fixed_rate & 0x100)) {
/* Only MCS rates */
data_desc->rate_info |=
cpu_to_le16(ENABLE_SHORTGI_RATE);
diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
index b66975f54567..e70c1c7fdf59 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
@@ -510,7 +510,6 @@ static int rsi_mac80211_add_interface(struct ieee80211_hw *hw,
if ((vif->type == NL80211_IFTYPE_AP) ||
(vif->type == NL80211_IFTYPE_P2P_GO)) {
rsi_send_rx_filter_frame(common, DISALLOW_BEACONS);
- common->min_rate = RSI_RATE_AUTO;
for (i = 0; i < common->max_stations; i++)
common->stations[i].sta = NULL;
}
@@ -1228,20 +1227,32 @@ static int rsi_mac80211_set_rate_mask(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
const struct cfg80211_bitrate_mask *mask)
{
+ const unsigned int mcs_offset = ARRAY_SIZE(rsi_rates);
struct rsi_hw *adapter = hw->priv;
struct rsi_common *common = adapter->priv;
- enum nl80211_band band = hw->conf.chandef.chan->band;
+ int i;
mutex_lock(&common->mutex);
- common->fixedrate_mask[band] = 0;
- if (mask->control[band].legacy == 0xfff) {
- common->fixedrate_mask[band] =
- (mask->control[band].ht_mcs[0] << 12);
- } else {
- common->fixedrate_mask[band] =
- mask->control[band].legacy;
+ for (i = 0; i < ARRAY_SIZE(common->rate_config); i++) {
+ struct rsi_rate_config *cfg = &common->rate_config[i];
+ u32 bm;
+
+ bm = mask->control[i].legacy | (mask->control[i].ht_mcs[0] << mcs_offset);
+ if (hweight32(bm) == 1) { /* single rate */
+ int rate_index = ffs(bm) - 1;
+
+ if (rate_index < mcs_offset)
+ cfg->fixed_hw_rate = rsi_rates[rate_index].hw_value;
+ else
+ cfg->fixed_hw_rate = rsi_mcsrates[rate_index - mcs_offset];
+ cfg->fixed_enabled = true;
+ } else {
+ cfg->configured_mask = bm;
+ cfg->fixed_enabled = false;
+ }
}
+
mutex_unlock(&common->mutex);
return 0;
@@ -1378,46 +1389,6 @@ void rsi_indicate_pkt_to_os(struct rsi_common *common,
ieee80211_rx_irqsafe(hw, skb);
}
-static void rsi_set_min_rate(struct ieee80211_hw *hw,
- struct ieee80211_sta *sta,
- struct rsi_common *common)
-{
- u8 band = hw->conf.chandef.chan->band;
- u8 ii;
- u32 rate_bitmap;
- bool matched = false;
-
- common->bitrate_mask[band] = sta->supp_rates[band];
-
- rate_bitmap = (common->fixedrate_mask[band] & sta->supp_rates[band]);
-
- if (rate_bitmap & 0xfff) {
- /* Find out the min rate */
- for (ii = 0; ii < ARRAY_SIZE(rsi_rates); ii++) {
- if (rate_bitmap & BIT(ii)) {
- common->min_rate = rsi_rates[ii].hw_value;
- matched = true;
- break;
- }
- }
- }
-
- common->vif_info[0].is_ht = sta->ht_cap.ht_supported;
-
- if ((common->vif_info[0].is_ht) && (rate_bitmap >> 12)) {
- for (ii = 0; ii < ARRAY_SIZE(rsi_mcsrates); ii++) {
- if ((rate_bitmap >> 12) & BIT(ii)) {
- common->min_rate = rsi_mcsrates[ii];
- matched = true;
- break;
- }
- }
- }
-
- if (!matched)
- common->min_rate = 0xffff;
-}
-
/**
* rsi_mac80211_sta_add() - This function notifies driver about a peer getting
* connected.
@@ -1516,9 +1487,9 @@ static int rsi_mac80211_sta_add(struct ieee80211_hw *hw,
if ((vif->type == NL80211_IFTYPE_STATION) ||
(vif->type == NL80211_IFTYPE_P2P_CLIENT)) {
- rsi_set_min_rate(hw, sta, common);
+ common->bitrate_mask[common->band] = sta->supp_rates[common->band];
+ common->vif_info[0].is_ht = sta->ht_cap.ht_supported;
if (sta->ht_cap.ht_supported) {
- common->vif_info[0].is_ht = true;
common->bitrate_mask[NL80211_BAND_2GHZ] =
sta->supp_rates[NL80211_BAND_2GHZ];
if ((sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ||
@@ -1592,7 +1563,6 @@ static int rsi_mac80211_sta_remove(struct ieee80211_hw *hw,
bss->qos = sta->wme;
common->bitrate_mask[NL80211_BAND_2GHZ] = 0;
common->bitrate_mask[NL80211_BAND_5GHZ] = 0;
- common->min_rate = 0xffff;
common->vif_info[0].is_ht = false;
common->vif_info[0].sgi = false;
common->vif_info[0].seq_start = 0;
diff --git a/drivers/net/wireless/rsi/rsi_91x_main.c b/drivers/net/wireless/rsi/rsi_91x_main.c
index d98483298555..143224a3802b 100644
--- a/drivers/net/wireless/rsi/rsi_91x_main.c
+++ b/drivers/net/wireless/rsi/rsi_91x_main.c
@@ -211,9 +211,10 @@ int rsi_read_pkt(struct rsi_common *common, u8 *rx_pkt, s32 rcv_pkt_len)
bt_pkt_type = frame_desc[offset + BT_RX_PKT_TYPE_OFST];
if (bt_pkt_type == BT_CARD_READY_IND) {
rsi_dbg(INFO_ZONE, "BT Card ready recvd\n");
- if (rsi_bt_ops.attach(common, &g_proto_ops))
- rsi_dbg(ERR_ZONE,
- "Failed to attach BT module\n");
+ if (common->fsm_state == FSM_MAC_INIT_DONE)
+ rsi_attach_bt(common);
+ else
+ common->bt_defer_attach = true;
} else {
if (common->bt_adapter)
rsi_bt_ops.recv_pkt(common->bt_adapter,
@@ -278,6 +279,15 @@ void rsi_set_bt_context(void *priv, void *bt_context)
}
#endif
+void rsi_attach_bt(struct rsi_common *common)
+{
+#ifdef CONFIG_RSI_COEX
+ if (rsi_bt_ops.attach(common, &g_proto_ops))
+ rsi_dbg(ERR_ZONE,
+ "Failed to attach BT module\n");
+#endif
+}
+
/**
* rsi_91x_init() - This function initializes os interface operations.
* @oper_mode: One of DEV_OPMODE_*.
diff --git a/drivers/net/wireless/rsi/rsi_91x_mgmt.c b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
index 891fd5f0fa76..0848f7a7e76c 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mgmt.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
@@ -276,7 +276,7 @@ static void rsi_set_default_parameters(struct rsi_common *common)
common->channel_width = BW_20MHZ;
common->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD;
common->channel = 1;
- common->min_rate = 0xffff;
+ memset(&common->rate_config, 0, sizeof(common->rate_config));
common->fsm_state = FSM_CARD_NOT_READY;
common->iface_down = true;
common->endpoint = EP_2GHZ_20MHZ;
@@ -1314,7 +1314,7 @@ static int rsi_send_auto_rate_request(struct rsi_common *common,
u8 band = hw->conf.chandef.chan->band;
u8 num_supported_rates = 0;
u8 rate_table_offset, rate_offset = 0;
- u32 rate_bitmap;
+ u32 rate_bitmap, configured_rates;
u16 *selected_rates, min_rate;
bool is_ht = false, is_sgi = false;
u16 frame_len = sizeof(struct rsi_auto_rate);
@@ -1364,6 +1364,10 @@ static int rsi_send_auto_rate_request(struct rsi_common *common,
is_sgi = true;
}
+ /* Limit to any rates administratively configured by cfg80211 */
+ configured_rates = common->rate_config[band].configured_mask ?: 0xffffffff;
+ rate_bitmap &= configured_rates;
+
if (band == NL80211_BAND_2GHZ) {
if ((rate_bitmap == 0) && (is_ht))
min_rate = RSI_RATE_MCS0;
@@ -1389,10 +1393,13 @@ static int rsi_send_auto_rate_request(struct rsi_common *common,
num_supported_rates = jj;
if (is_ht) {
- for (ii = 0; ii < ARRAY_SIZE(mcs); ii++)
- selected_rates[jj++] = mcs[ii];
- num_supported_rates += ARRAY_SIZE(mcs);
- rate_offset += ARRAY_SIZE(mcs);
+ for (ii = 0; ii < ARRAY_SIZE(mcs); ii++) {
+ if (configured_rates & BIT(ii + ARRAY_SIZE(rsi_rates))) {
+ selected_rates[jj++] = mcs[ii];
+ num_supported_rates++;
+ rate_offset++;
+ }
+ }
}
sort(selected_rates, jj, sizeof(u16), &rsi_compare, NULL);
@@ -1482,7 +1489,7 @@ void rsi_inform_bss_status(struct rsi_common *common,
qos_enable,
aid, sta_id,
vif);
- if (common->min_rate == 0xffff)
+ if (!common->rate_config[common->band].fixed_enabled)
rsi_send_auto_rate_request(common, sta, sta_id, vif);
if (opmode == RSI_OPMODE_STA &&
!(assoc_cap & WLAN_CAPABILITY_PRIVACY) &&
@@ -2071,6 +2078,9 @@ static int rsi_handle_ta_confirm_type(struct rsi_common *common,
if (common->reinit_hw) {
complete(&common->wlan_init_completion);
} else {
+ if (common->bt_defer_attach)
+ rsi_attach_bt(common);
+
return rsi_mac80211_attach(common);
}
}
diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio.c b/drivers/net/wireless/rsi/rsi_91x_sdio.c
index e0c502bc4270..9f16128e4ffa 100644
--- a/drivers/net/wireless/rsi/rsi_91x_sdio.c
+++ b/drivers/net/wireless/rsi/rsi_91x_sdio.c
@@ -24,10 +24,7 @@
/* Default operating mode is wlan STA + BT */
static u16 dev_oper_mode = DEV_OPMODE_STA_BT_DUAL;
module_param(dev_oper_mode, ushort, 0444);
-MODULE_PARM_DESC(dev_oper_mode,
- "1[Wi-Fi], 4[BT], 8[BT LE], 5[Wi-Fi STA + BT classic]\n"
- "9[Wi-Fi STA + BT LE], 13[Wi-Fi STA + BT classic + BT LE]\n"
- "6[AP + BT classic], 14[AP + BT classic + BT LE]");
+MODULE_PARM_DESC(dev_oper_mode, DEV_OPMODE_PARAM_DESC);
/**
* rsi_sdio_set_cmd52_arg() - This function prepares cmd 52 read/write arg.
diff --git a/drivers/net/wireless/rsi/rsi_91x_usb.c b/drivers/net/wireless/rsi/rsi_91x_usb.c
index 416976f09888..6a120211800d 100644
--- a/drivers/net/wireless/rsi/rsi_91x_usb.c
+++ b/drivers/net/wireless/rsi/rsi_91x_usb.c
@@ -25,10 +25,7 @@
/* Default operating mode is wlan STA + BT */
static u16 dev_oper_mode = DEV_OPMODE_STA_BT_DUAL;
module_param(dev_oper_mode, ushort, 0444);
-MODULE_PARM_DESC(dev_oper_mode,
- "1[Wi-Fi], 4[BT], 8[BT LE], 5[Wi-Fi STA + BT classic]\n"
- "9[Wi-Fi STA + BT LE], 13[Wi-Fi STA + BT classic + BT LE]\n"
- "6[AP + BT classic], 14[AP + BT classic + BT LE]");
+MODULE_PARM_DESC(dev_oper_mode, DEV_OPMODE_PARAM_DESC);
static int rsi_rx_urb_submit(struct rsi_hw *adapter, u8 ep_num, gfp_t flags);
diff --git a/drivers/net/wireless/rsi/rsi_hal.h b/drivers/net/wireless/rsi/rsi_hal.h
index d044a440fa08..5b07262a9740 100644
--- a/drivers/net/wireless/rsi/rsi_hal.h
+++ b/drivers/net/wireless/rsi/rsi_hal.h
@@ -28,6 +28,17 @@
#define DEV_OPMODE_AP_BT 6
#define DEV_OPMODE_AP_BT_DUAL 14
+#define DEV_OPMODE_PARAM_DESC \
+ __stringify(DEV_OPMODE_WIFI_ALONE) "[Wi-Fi alone], " \
+ __stringify(DEV_OPMODE_BT_ALONE) "[BT classic alone], " \
+ __stringify(DEV_OPMODE_BT_LE_ALONE) "[BT LE alone], " \
+ __stringify(DEV_OPMODE_BT_DUAL) "[BT classic + BT LE alone], " \
+ __stringify(DEV_OPMODE_STA_BT) "[Wi-Fi STA + BT classic], " \
+ __stringify(DEV_OPMODE_STA_BT_LE) "[Wi-Fi STA + BT LE], " \
+ __stringify(DEV_OPMODE_STA_BT_DUAL) "[Wi-Fi STA + BT classic + BT LE], " \
+ __stringify(DEV_OPMODE_AP_BT) "[Wi-Fi AP + BT classic], " \
+ __stringify(DEV_OPMODE_AP_BT_DUAL) "[Wi-Fi AP + BT classic + BT LE]"
+
#define FLASH_WRITE_CHUNK_SIZE (4 * 1024)
#define FLASH_SECTOR_SIZE (4 * 1024)
diff --git a/drivers/net/wireless/rsi/rsi_main.h b/drivers/net/wireless/rsi/rsi_main.h
index 0f535850a383..dcf8fb40698b 100644
--- a/drivers/net/wireless/rsi/rsi_main.h
+++ b/drivers/net/wireless/rsi/rsi_main.h
@@ -61,6 +61,7 @@ enum RSI_FSM_STATES {
extern u32 rsi_zone_enabled;
extern __printf(2, 3) void rsi_dbg(u32 zone, const char *fmt, ...);
+#define RSI_MAX_BANDS 2
#define RSI_MAX_VIFS 3
#define NUM_EDCA_QUEUES 4
#define IEEE80211_ADDR_LEN 6
@@ -139,6 +140,7 @@ struct skb_info {
u8 internal_hdr_size;
struct ieee80211_vif *vif;
u8 vap_id;
+ bool have_key;
};
enum edca_queue {
@@ -229,6 +231,12 @@ struct rsi_9116_features {
u32 ps_options;
};
+struct rsi_rate_config {
+ u32 configured_mask; /* configured by mac80211 bits 0-11=legacy 12+ mcs */
+ u16 fixed_hw_rate;
+ bool fixed_enabled;
+};
+
struct rsi_common {
struct rsi_hw *priv;
struct vif_priv vif_info[RSI_MAX_VIFS];
@@ -254,8 +262,8 @@ struct rsi_common {
u8 channel_width;
u16 rts_threshold;
- u16 bitrate_mask[2];
- u32 fixedrate_mask[2];
+ u32 bitrate_mask[RSI_MAX_BANDS];
+ struct rsi_rate_config rate_config[RSI_MAX_BANDS];
u8 rf_reset;
struct transmit_q_stats tx_stats;
@@ -276,7 +284,6 @@ struct rsi_common {
u8 mac_id;
u8 radio_id;
u16 rate_pwr[20];
- u16 min_rate;
/* WMM algo related */
u8 selected_qnum;
@@ -320,6 +327,7 @@ struct rsi_common {
struct ieee80211_vif *roc_vif;
bool eapol4_confirm;
+ bool bt_defer_attach;
void *bt_adapter;
struct cfg80211_scan_request *hwscan;
@@ -401,5 +409,6 @@ struct rsi_host_intf_ops {
enum rsi_host_intf rsi_get_host_intf(void *priv);
void rsi_set_bt_context(void *priv, void *bt_context);
+void rsi_attach_bt(struct rsi_common *common);
#endif
diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_usb.c b/drivers/net/wireless/zydas/zd1211rw/zd_usb.c
index a7ceef10bf6a..850c26bc9524 100644
--- a/drivers/net/wireless/zydas/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zydas/zd1211rw/zd_usb.c
@@ -65,7 +65,6 @@ static const struct usb_device_id usb_ids[] = {
{ USB_DEVICE(0x0586, 0x3412), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x0586, 0x3413), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x079b, 0x0062), .driver_info = DEVICE_ZD1211B },
- { USB_DEVICE(0x07b8, 0x6001), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x07fa, 0x1196), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x083a, 0x4505), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x083a, 0xe501), .driver_info = DEVICE_ZD1211B },
diff --git a/drivers/net/wwan/Kconfig b/drivers/net/wwan/Kconfig
index 77dbfc418bce..17543be14665 100644
--- a/drivers/net/wwan/Kconfig
+++ b/drivers/net/wwan/Kconfig
@@ -71,6 +71,7 @@ config RPMSG_WWAN_CTRL
config IOSM
tristate "IOSM Driver for Intel M.2 WWAN Device"
depends on INTEL_IOMMU
+ select NET_DEVLINK
help
This driver enables Intel M.2 WWAN Device communication.
diff --git a/drivers/net/wwan/iosm/Makefile b/drivers/net/wwan/iosm/Makefile
index 4f9f0ae398e1..b838034bb120 100644
--- a/drivers/net/wwan/iosm/Makefile
+++ b/drivers/net/wwan/iosm/Makefile
@@ -18,6 +18,9 @@ iosm-y = \
iosm_ipc_protocol.o \
iosm_ipc_protocol_ops.o \
iosm_ipc_mux.o \
- iosm_ipc_mux_codec.o
+ iosm_ipc_mux_codec.o \
+ iosm_ipc_devlink.o \
+ iosm_ipc_flash.o \
+ iosm_ipc_coredump.o
obj-$(CONFIG_IOSM) := iosm.o
diff --git a/drivers/net/wwan/iosm/iosm_ipc_chnl_cfg.c b/drivers/net/wwan/iosm/iosm_ipc_chnl_cfg.c
index 519361ec40df..128c999e08bb 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_chnl_cfg.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_chnl_cfg.c
@@ -8,7 +8,7 @@
#include "iosm_ipc_chnl_cfg.h"
/* Max. sizes of a downlink buffers */
-#define IPC_MEM_MAX_DL_FLASH_BUF_SIZE (16 * 1024)
+#define IPC_MEM_MAX_DL_FLASH_BUF_SIZE (64 * 1024)
#define IPC_MEM_MAX_DL_LOOPBACK_SIZE (1 * 1024 * 1024)
#define IPC_MEM_MAX_DL_AT_BUF_SIZE 2048
#define IPC_MEM_MAX_DL_RPC_BUF_SIZE (32 * 1024)
@@ -60,6 +60,10 @@ static struct ipc_chnl_cfg modem_cfg[] = {
{ IPC_MEM_CTRL_CHL_ID_6, IPC_MEM_PIPE_12, IPC_MEM_PIPE_13,
IPC_MEM_MAX_TDS_MBIM, IPC_MEM_MAX_TDS_MBIM,
IPC_MEM_MAX_DL_MBIM_BUF_SIZE, WWAN_PORT_MBIM },
+ /* Flash Channel/Coredump Channel */
+ { IPC_MEM_CTRL_CHL_ID_7, IPC_MEM_PIPE_0, IPC_MEM_PIPE_1,
+ IPC_MEM_MAX_TDS_FLASH_UL, IPC_MEM_MAX_TDS_FLASH_DL,
+ IPC_MEM_MAX_DL_FLASH_BUF_SIZE, WWAN_PORT_UNKNOWN },
};
int ipc_chnl_cfg_get(struct ipc_chnl_cfg *chnl_cfg, int index)
diff --git a/drivers/net/wwan/iosm/iosm_ipc_chnl_cfg.h b/drivers/net/wwan/iosm/iosm_ipc_chnl_cfg.h
index 422471367f78..e77084e76718 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_chnl_cfg.h
+++ b/drivers/net/wwan/iosm/iosm_ipc_chnl_cfg.h
@@ -23,6 +23,7 @@ enum ipc_channel_id {
IPC_MEM_CTRL_CHL_ID_4,
IPC_MEM_CTRL_CHL_ID_5,
IPC_MEM_CTRL_CHL_ID_6,
+ IPC_MEM_CTRL_CHL_ID_7,
};
/**
diff --git a/drivers/net/wwan/iosm/iosm_ipc_coredump.c b/drivers/net/wwan/iosm/iosm_ipc_coredump.c
new file mode 100644
index 000000000000..9acd87724c9d
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_coredump.c
@@ -0,0 +1,125 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020-2021 Intel Corporation.
+ */
+
+#include "iosm_ipc_coredump.h"
+
+/**
+ * ipc_coredump_collect - To collect coredump
+ * @devlink: Pointer to devlink instance.
+ * @data: Pointer to snapshot
+ * @entry: ID of requested snapshot
+ * @region_size: Region size
+ *
+ * Returns: 0 on success, error on failure
+ */
+int ipc_coredump_collect(struct iosm_devlink *devlink, u8 **data, int entry,
+ u32 region_size)
+{
+ int ret, bytes_to_read, bytes_read = 0, i = 0;
+ s32 remaining;
+ u8 *data_ptr;
+
+ data_ptr = vmalloc(region_size);
+ if (!data_ptr)
+ return -ENOMEM;
+
+ remaining = devlink->cd_file_info[entry].actual_size;
+ ret = ipc_devlink_send_cmd(devlink, rpsi_cmd_coredump_get, entry);
+ if (ret) {
+ dev_err(devlink->dev, "Send coredump_get cmd failed");
+ goto get_cd_fail;
+ }
+ while (remaining > 0) {
+ bytes_to_read = min(remaining, MAX_DATA_SIZE);
+ bytes_read = 0;
+ ret = ipc_imem_sys_devlink_read(devlink, data_ptr + i,
+ bytes_to_read, &bytes_read);
+ if (ret) {
+ dev_err(devlink->dev, "CD data read failed");
+ goto get_cd_fail;
+ }
+ remaining -= bytes_read;
+ i += bytes_read;
+ }
+
+ *data = data_ptr;
+
+ return 0;
+
+get_cd_fail:
+ vfree(data_ptr);
+ return ret;
+}
+
+/**
+ * ipc_coredump_get_list - Get coredump list from modem
+ * @devlink: Pointer to devlink instance.
+ * @cmd: RPSI command to be sent
+ *
+ * Returns: 0 on success, error on failure
+ */
+int ipc_coredump_get_list(struct iosm_devlink *devlink, u16 cmd)
+{
+ u32 byte_read, num_entries, file_size;
+ struct iosm_cd_table *cd_table;
+ u8 size[MAX_SIZE_LEN], i;
+ char *filename;
+ int ret;
+
+ cd_table = kzalloc(MAX_CD_LIST_SIZE, GFP_KERNEL);
+ if (!cd_table) {
+ ret = -ENOMEM;
+ goto cd_init_fail;
+ }
+
+ ret = ipc_devlink_send_cmd(devlink, cmd, MAX_CD_LIST_SIZE);
+ if (ret) {
+ dev_err(devlink->dev, "rpsi_cmd_coredump_start failed");
+ goto cd_init_fail;
+ }
+
+ ret = ipc_imem_sys_devlink_read(devlink, (u8 *)cd_table,
+ MAX_CD_LIST_SIZE, &byte_read);
+ if (ret) {
+ dev_err(devlink->dev, "Coredump data is invalid");
+ goto cd_init_fail;
+ }
+
+ if (byte_read != MAX_CD_LIST_SIZE)
+ goto cd_init_fail;
+
+ if (cmd == rpsi_cmd_coredump_start) {
+ num_entries = le32_to_cpu(cd_table->list.num_entries);
+ if (num_entries == 0 || num_entries > IOSM_NOF_CD_REGION) {
+ ret = -EINVAL;
+ goto cd_init_fail;
+ }
+
+ for (i = 0; i < num_entries; i++) {
+ file_size = le32_to_cpu(cd_table->list.entry[i].size);
+ filename = cd_table->list.entry[i].filename;
+
+ if (file_size > devlink->cd_file_info[i].default_size) {
+ ret = -EINVAL;
+ goto cd_init_fail;
+ }
+
+ devlink->cd_file_info[i].actual_size = file_size;
+ dev_dbg(devlink->dev, "file: %s actual size %d",
+ filename, file_size);
+ devlink_flash_update_status_notify(devlink->devlink_ctx,
+ filename,
+ "FILENAME", 0, 0);
+ snprintf(size, sizeof(size), "%d", file_size);
+ devlink_flash_update_status_notify(devlink->devlink_ctx,
+ size, "FILE SIZE",
+ 0, 0);
+ }
+ }
+
+cd_init_fail:
+ kfree(cd_table);
+ return ret;
+}
diff --git a/drivers/net/wwan/iosm/iosm_ipc_coredump.h b/drivers/net/wwan/iosm/iosm_ipc_coredump.h
new file mode 100644
index 000000000000..0809ba664276
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_coredump.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (C) 2020-2021 Intel Corporation.
+ */
+
+#ifndef _IOSM_IPC_COREDUMP_H_
+#define _IOSM_IPC_COREDUMP_H_
+
+#include "iosm_ipc_devlink.h"
+
+/* Max number of bytes to receive for Coredump list structure */
+#define MAX_CD_LIST_SIZE 0x1000
+
+/* Max buffer allocated to receive coredump data */
+#define MAX_DATA_SIZE 0x00010000
+
+/* Max number of file entries */
+#define MAX_NOF_ENTRY 256
+
+/* Max length */
+#define MAX_SIZE_LEN 32
+
+/**
+ * struct iosm_cd_list_entry - Structure to hold coredump file info.
+ * @size: Number of bytes for the entry
+ * @filename: Coredump filename to be generated on host
+ */
+struct iosm_cd_list_entry {
+ __le32 size;
+ char filename[IOSM_MAX_FILENAME_LEN];
+} __packed;
+
+/**
+ * struct iosm_cd_list - Structure to hold list of coredump files
+ * to be collected.
+ * @num_entries: Number of entries to be received
+ * @entry: Contains File info
+ */
+struct iosm_cd_list {
+ __le32 num_entries;
+ struct iosm_cd_list_entry entry[MAX_NOF_ENTRY];
+} __packed;
+
+/**
+ * struct iosm_cd_table - Common Coredump table
+ * @version: Version of coredump structure
+ * @list: Coredump list structure
+ */
+struct iosm_cd_table {
+ __le32 version;
+ struct iosm_cd_list list;
+} __packed;
+
+int ipc_coredump_collect(struct iosm_devlink *devlink, u8 **data, int entry,
+ u32 region_size);
+
+int ipc_coredump_get_list(struct iosm_devlink *devlink, u16 cmd);
+
+#endif /* _IOSM_IPC_COREDUMP_H_ */
diff --git a/drivers/net/wwan/iosm/iosm_ipc_devlink.c b/drivers/net/wwan/iosm/iosm_ipc_devlink.c
new file mode 100644
index 000000000000..17da85a8f337
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_devlink.c
@@ -0,0 +1,321 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020-2021 Intel Corporation.
+ */
+
+#include "iosm_ipc_chnl_cfg.h"
+#include "iosm_ipc_coredump.h"
+#include "iosm_ipc_devlink.h"
+#include "iosm_ipc_flash.h"
+
+/* Coredump list */
+static struct iosm_coredump_file_info list[IOSM_NOF_CD_REGION] = {
+ {"report.json", REPORT_JSON_SIZE,},
+ {"coredump.fcd", COREDUMP_FCD_SIZE,},
+ {"cdd.log", CDD_LOG_SIZE,},
+ {"eeprom.bin", EEPROM_BIN_SIZE,},
+ {"bootcore_trace.bin", BOOTCORE_TRC_BIN_SIZE,},
+ {"bootcore_prev_trace.bin", BOOTCORE_PREV_TRC_BIN_SIZE,},
+};
+
+/* Get the param values for the specific param ID's */
+static int ipc_devlink_get_param(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct iosm_devlink *ipc_devlink = devlink_priv(dl);
+
+ if (id == IOSM_DEVLINK_PARAM_ID_ERASE_FULL_FLASH)
+ ctx->val.vu8 = ipc_devlink->param.erase_full_flash;
+
+ return 0;
+}
+
+/* Set the param values for the specific param ID's */
+static int ipc_devlink_set_param(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct iosm_devlink *ipc_devlink = devlink_priv(dl);
+
+ if (id == IOSM_DEVLINK_PARAM_ID_ERASE_FULL_FLASH)
+ ipc_devlink->param.erase_full_flash = ctx->val.vu8;
+
+ return 0;
+}
+
+/* Devlink param structure array */
+static const struct devlink_param iosm_devlink_params[] = {
+ DEVLINK_PARAM_DRIVER(IOSM_DEVLINK_PARAM_ID_ERASE_FULL_FLASH,
+ "erase_full_flash", DEVLINK_PARAM_TYPE_BOOL,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ ipc_devlink_get_param, ipc_devlink_set_param,
+ NULL),
+};
+
+/* Get devlink flash component type */
+static enum iosm_flash_comp_type
+ipc_devlink_get_flash_comp_type(const char comp_str[], u32 len)
+{
+ enum iosm_flash_comp_type fls_type;
+
+ if (!strncmp("PSI", comp_str, len))
+ fls_type = FLASH_COMP_TYPE_PSI;
+ else if (!strncmp("EBL", comp_str, len))
+ fls_type = FLASH_COMP_TYPE_EBL;
+ else if (!strncmp("FLS", comp_str, len))
+ fls_type = FLASH_COMP_TYPE_FLS;
+ else
+ fls_type = FLASH_COMP_TYPE_INVAL;
+
+ return fls_type;
+}
+
+/* Function triggered on devlink flash command
+ * Flash update function which calls multiple functions based on
+ * component type specified in the flash command
+ */
+static int ipc_devlink_flash_update(struct devlink *devlink,
+ struct devlink_flash_update_params *params,
+ struct netlink_ext_ack *extack)
+{
+ struct iosm_devlink *ipc_devlink = devlink_priv(devlink);
+ enum iosm_flash_comp_type fls_type;
+ struct iosm_devlink_image *header;
+ int rc = -EINVAL;
+ u8 *mdm_rsp;
+
+ header = (struct iosm_devlink_image *)params->fw->data;
+
+ if (!header || params->fw->size <= IOSM_DEVLINK_HDR_SIZE ||
+ (memcmp(header->magic_header, IOSM_DEVLINK_MAGIC_HEADER,
+ IOSM_DEVLINK_MAGIC_HEADER_LEN) != 0))
+ return -EINVAL;
+
+ mdm_rsp = kzalloc(IOSM_EBL_DW_PACK_SIZE, GFP_KERNEL);
+ if (!mdm_rsp)
+ return -ENOMEM;
+
+ fls_type = ipc_devlink_get_flash_comp_type(header->image_type,
+ IOSM_DEVLINK_MAX_IMG_LEN);
+
+ switch (fls_type) {
+ case FLASH_COMP_TYPE_PSI:
+ rc = ipc_flash_boot_psi(ipc_devlink, params->fw);
+ break;
+ case FLASH_COMP_TYPE_EBL:
+ rc = ipc_flash_boot_ebl(ipc_devlink, params->fw);
+ if (rc)
+ break;
+ rc = ipc_flash_boot_set_capabilities(ipc_devlink, mdm_rsp);
+ if (rc)
+ break;
+ rc = ipc_flash_read_swid(ipc_devlink, mdm_rsp);
+ break;
+ case FLASH_COMP_TYPE_FLS:
+ rc = ipc_flash_send_fls(ipc_devlink, params->fw, mdm_rsp);
+ break;
+ default:
+ devlink_flash_update_status_notify(devlink, "Invalid component",
+ NULL, 0, 0);
+ break;
+ }
+
+ if (!rc)
+ devlink_flash_update_status_notify(devlink, "Flashing success",
+ header->image_type, 0, 0);
+ else
+ devlink_flash_update_status_notify(devlink, "Flashing failed",
+ header->image_type, 0, 0);
+
+ kfree(mdm_rsp);
+ return rc;
+}
+
+/* Call back function for devlink ops */
+static const struct devlink_ops devlink_flash_ops = {
+ .flash_update = ipc_devlink_flash_update,
+};
+
+/**
+ * ipc_devlink_send_cmd - Send command to Modem
+ * @ipc_devlink: Pointer to struct iosm_devlink
+ * @cmd: Command to be sent to modem
+ * @entry: Command entry number
+ *
+ * Returns: 0 on success and failure value on error
+ */
+int ipc_devlink_send_cmd(struct iosm_devlink *ipc_devlink, u16 cmd, u32 entry)
+{
+ struct iosm_rpsi_cmd rpsi_cmd;
+
+ rpsi_cmd.param.dword = cpu_to_le32(entry);
+ rpsi_cmd.cmd = cpu_to_le16(cmd);
+ rpsi_cmd.crc = rpsi_cmd.param.word[0] ^ rpsi_cmd.param.word[1] ^
+ rpsi_cmd.cmd;
+
+ return ipc_imem_sys_devlink_write(ipc_devlink, (u8 *)&rpsi_cmd,
+ sizeof(rpsi_cmd));
+}
+
+/* Function to create snapshot */
+static int ipc_devlink_coredump_snapshot(struct devlink *dl,
+ const struct devlink_region_ops *ops,
+ struct netlink_ext_ack *extack,
+ u8 **data)
+{
+ struct iosm_devlink *ipc_devlink = devlink_priv(dl);
+ struct iosm_coredump_file_info *cd_list = ops->priv;
+ u32 region_size;
+ int rc;
+
+ dev_dbg(ipc_devlink->dev, "Region:%s, ID:%d", ops->name,
+ cd_list->entry);
+ region_size = cd_list->default_size;
+ rc = ipc_coredump_collect(ipc_devlink, data, cd_list->entry,
+ region_size);
+ if (rc) {
+ dev_err(ipc_devlink->dev, "Fail to create snapshot,err %d", rc);
+ goto coredump_collect_err;
+ }
+
+ /* Send coredump end cmd indicating end of coredump collection */
+ if (cd_list->entry == (IOSM_NOF_CD_REGION - 1))
+ ipc_coredump_get_list(ipc_devlink, rpsi_cmd_coredump_end);
+
+ return 0;
+
+coredump_collect_err:
+ ipc_coredump_get_list(ipc_devlink, rpsi_cmd_coredump_end);
+ return rc;
+}
+
+/* To create regions for coredump files */
+static int ipc_devlink_create_region(struct iosm_devlink *devlink)
+{
+ struct devlink_region_ops *mdm_coredump;
+ int rc = 0;
+ int i;
+
+ mdm_coredump = devlink->iosm_devlink_mdm_coredump;
+ for (i = 0; i < IOSM_NOF_CD_REGION; i++) {
+ mdm_coredump[i].name = list[i].filename;
+ mdm_coredump[i].snapshot = ipc_devlink_coredump_snapshot;
+ mdm_coredump[i].destructor = vfree;
+ devlink->cd_regions[i] =
+ devlink_region_create(devlink->devlink_ctx,
+ &mdm_coredump[i], MAX_SNAPSHOTS,
+ list[i].default_size);
+
+ if (IS_ERR(devlink->cd_regions[i])) {
+ rc = PTR_ERR(devlink->cd_regions[i]);
+ dev_err(devlink->dev, "Devlink region fail,err %d", rc);
+ /* Delete previously created regions */
+ for ( ; i >= 0; i--)
+ devlink_region_destroy(devlink->cd_regions[i]);
+ goto region_create_fail;
+ }
+ list[i].entry = i;
+ mdm_coredump[i].priv = list + i;
+ }
+region_create_fail:
+ return rc;
+}
+
+/* To Destroy devlink regions */
+static void ipc_devlink_destroy_region(struct iosm_devlink *ipc_devlink)
+{
+ u8 i;
+
+ for (i = 0; i < IOSM_NOF_CD_REGION; i++)
+ devlink_region_destroy(ipc_devlink->cd_regions[i]);
+}
+
+/**
+ * ipc_devlink_init - Initialize/register devlink to IOSM driver
+ * @ipc_imem: Pointer to struct iosm_imem
+ *
+ * Returns: Pointer to iosm_devlink on success and NULL on failure
+ */
+struct iosm_devlink *ipc_devlink_init(struct iosm_imem *ipc_imem)
+{
+ struct ipc_chnl_cfg chnl_cfg_flash = { 0 };
+ struct iosm_devlink *ipc_devlink;
+ struct devlink *devlink_ctx;
+ int rc;
+
+ devlink_ctx = devlink_alloc(&devlink_flash_ops,
+ sizeof(struct iosm_devlink),
+ ipc_imem->dev);
+ if (!devlink_ctx) {
+ dev_err(ipc_imem->dev, "devlink_alloc failed");
+ goto devlink_alloc_fail;
+ }
+
+ ipc_devlink = devlink_priv(devlink_ctx);
+ ipc_devlink->devlink_ctx = devlink_ctx;
+ ipc_devlink->pcie = ipc_imem->pcie;
+ ipc_devlink->dev = ipc_imem->dev;
+
+ rc = devlink_params_register(devlink_ctx, iosm_devlink_params,
+ ARRAY_SIZE(iosm_devlink_params));
+ if (rc) {
+ dev_err(ipc_devlink->dev,
+ "devlink_params_register failed. rc %d", rc);
+ goto param_reg_fail;
+ }
+
+ ipc_devlink->cd_file_info = list;
+
+ rc = ipc_devlink_create_region(ipc_devlink);
+ if (rc) {
+ dev_err(ipc_devlink->dev, "Devlink Region create failed, rc %d",
+ rc);
+ goto region_create_fail;
+ }
+
+ if (ipc_chnl_cfg_get(&chnl_cfg_flash, IPC_MEM_CTRL_CHL_ID_7) < 0)
+ goto chnl_get_fail;
+
+ ipc_imem_channel_init(ipc_imem, IPC_CTYPE_CTRL,
+ chnl_cfg_flash, IRQ_MOD_OFF);
+
+ init_completion(&ipc_devlink->devlink_sio.read_sem);
+ skb_queue_head_init(&ipc_devlink->devlink_sio.rx_list);
+
+ devlink_register(devlink_ctx);
+ dev_dbg(ipc_devlink->dev, "iosm devlink register success");
+
+ return ipc_devlink;
+
+chnl_get_fail:
+ ipc_devlink_destroy_region(ipc_devlink);
+region_create_fail:
+ devlink_params_unregister(devlink_ctx, iosm_devlink_params,
+ ARRAY_SIZE(iosm_devlink_params));
+param_reg_fail:
+ devlink_free(devlink_ctx);
+devlink_alloc_fail:
+ return NULL;
+}
+
+/**
+ * ipc_devlink_deinit - To unintialize the devlink from IOSM driver.
+ * @ipc_devlink: Devlink instance
+ */
+void ipc_devlink_deinit(struct iosm_devlink *ipc_devlink)
+{
+ struct devlink *devlink_ctx = ipc_devlink->devlink_ctx;
+
+ devlink_unregister(devlink_ctx);
+ ipc_devlink_destroy_region(ipc_devlink);
+ devlink_params_unregister(devlink_ctx, iosm_devlink_params,
+ ARRAY_SIZE(iosm_devlink_params));
+ if (ipc_devlink->devlink_sio.devlink_read_pend) {
+ complete(&ipc_devlink->devlink_sio.read_sem);
+ complete(&ipc_devlink->devlink_sio.channel->ul_sem);
+ }
+ if (!ipc_devlink->devlink_sio.devlink_read_pend)
+ skb_queue_purge(&ipc_devlink->devlink_sio.rx_list);
+
+ ipc_imem_sys_devlink_close(ipc_devlink);
+ devlink_free(devlink_ctx);
+}
diff --git a/drivers/net/wwan/iosm/iosm_ipc_devlink.h b/drivers/net/wwan/iosm/iosm_ipc_devlink.h
new file mode 100644
index 000000000000..35c2d013b9cc
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_devlink.h
@@ -0,0 +1,205 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (C) 2020-2021 Intel Corporation.
+ */
+
+#ifndef _IOSM_IPC_DEVLINK_H_
+#define _IOSM_IPC_DEVLINK_H_
+
+#include <net/devlink.h>
+
+#include "iosm_ipc_imem.h"
+#include "iosm_ipc_imem_ops.h"
+#include "iosm_ipc_pcie.h"
+
+/* Image ext max len */
+#define IOSM_DEVLINK_MAX_IMG_LEN 3
+/* Magic Header */
+#define IOSM_DEVLINK_MAGIC_HEADER "IOSM_DEVLINK_HEADER"
+/* Magic Header len */
+#define IOSM_DEVLINK_MAGIC_HEADER_LEN 20
+/* Devlink image type */
+#define IOSM_DEVLINK_IMG_TYPE 4
+/* Reserve header size */
+#define IOSM_DEVLINK_RESERVED 34
+/* Devlink Image Header size */
+#define IOSM_DEVLINK_HDR_SIZE sizeof(struct iosm_devlink_image)
+/* MAX file name length */
+#define IOSM_MAX_FILENAME_LEN 32
+/* EBL response size */
+#define IOSM_EBL_RSP_SIZE 76
+/* MAX number of regions supported */
+#define IOSM_NOF_CD_REGION 6
+/* MAX number of SNAPSHOTS supported */
+#define MAX_SNAPSHOTS 1
+/* Default Coredump file size */
+#define REPORT_JSON_SIZE 0x800
+#define COREDUMP_FCD_SIZE 0x10E00000
+#define CDD_LOG_SIZE 0x30000
+#define EEPROM_BIN_SIZE 0x10000
+#define BOOTCORE_TRC_BIN_SIZE 0x8000
+#define BOOTCORE_PREV_TRC_BIN_SIZE 0x20000
+
+/**
+ * enum iosm_devlink_param_id - Enum type to different devlink params
+ * @IOSM_DEVLINK_PARAM_ID_BASE: Devlink param base ID
+ * @IOSM_DEVLINK_PARAM_ID_ERASE_FULL_FLASH: Set if full erase required
+ */
+
+enum iosm_devlink_param_id {
+ IOSM_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
+ IOSM_DEVLINK_PARAM_ID_ERASE_FULL_FLASH,
+};
+
+/**
+ * enum iosm_rpsi_cmd_code - Enum type for RPSI command list
+ * @rpsi_cmd_code_ebl: Command to load ebl
+ * @rpsi_cmd_coredump_start: Command to get list of files and
+ * file size info from PSI
+ * @rpsi_cmd_coredump_get: Command to get the coredump data
+ * @rpsi_cmd_coredump_end: Command to stop receiving the coredump
+ */
+enum iosm_rpsi_cmd_code {
+ rpsi_cmd_code_ebl = 0x02,
+ rpsi_cmd_coredump_start = 0x10,
+ rpsi_cmd_coredump_get = 0x11,
+ rpsi_cmd_coredump_end = 0x12,
+};
+
+/**
+ * enum iosm_flash_comp_type - Enum for different flash component types
+ * @FLASH_COMP_TYPE_PSI: PSI flash comp type
+ * @FLASH_COMP_TYPE_EBL: EBL flash comp type
+ * @FLASH_COMP_TYPE_FLS: FLS flash comp type
+ * @FLASH_COMP_TYPE_INVAL: Invalid flash comp type
+ */
+enum iosm_flash_comp_type {
+ FLASH_COMP_TYPE_PSI,
+ FLASH_COMP_TYPE_EBL,
+ FLASH_COMP_TYPE_FLS,
+ FLASH_COMP_TYPE_INVAL,
+};
+
+/**
+ * struct iosm_devlink_sio - SIO instance
+ * @rx_list: Downlink skbuf list received from CP
+ * @read_sem: Needed for the blocking read or downlink transfer
+ * @channel_id: Reserved channel id for flashing/CD collection to RAM
+ * @channel: Channel instance for flashing and coredump
+ * @devlink_read_pend: Check if read is pending
+ */
+struct iosm_devlink_sio {
+ struct sk_buff_head rx_list;
+ struct completion read_sem;
+ int channel_id;
+ struct ipc_mem_channel *channel;
+ u32 devlink_read_pend;
+};
+
+/**
+ * struct iosm_flash_params - List of flash params required for flashing
+ * @erase_full_flash: To set the flashing mode
+ * erase_full_flash = 1; full erase
+ * erase_full_flash = 0; no erase
+ * @erase_full_flash_done: Flag to check if it is a full erase
+ */
+struct iosm_flash_params {
+ u8 erase_full_flash;
+ u8 erase_full_flash_done;
+};
+
+/**
+ * struct iosm_devlink_image - Structure with Fls file header info
+ * @magic_header: Header of the firmware image
+ * @image_type: Firmware image type
+ * @region_address: Address of the region to be flashed
+ * @download_region: Field to identify if it is a region
+ * @last_region: Field to identify if it is last region
+ * @reserved: Reserved field
+ */
+struct iosm_devlink_image {
+ char magic_header[IOSM_DEVLINK_MAGIC_HEADER_LEN];
+ char image_type[IOSM_DEVLINK_IMG_TYPE];
+ __le32 region_address;
+ u8 download_region;
+ u8 last_region;
+ u8 reserved[IOSM_DEVLINK_RESERVED];
+} __packed;
+
+/**
+ * struct iosm_ebl_ctx_data - EBL ctx data used during flashing
+ * @ebl_sw_info_version: SWID version info obtained from EBL
+ * @m_ebl_resp: Buffer used to read and write the ebl data
+ */
+struct iosm_ebl_ctx_data {
+ u8 ebl_sw_info_version;
+ u8 m_ebl_resp[IOSM_EBL_RSP_SIZE];
+};
+
+/**
+ * struct iosm_coredump_file_info - Coredump file info
+ * @filename: Name of coredump file
+ * @default_size: Default size of coredump file
+ * @actual_size: Actual size of coredump file
+ * @entry: Index of the coredump file
+ */
+struct iosm_coredump_file_info {
+ char filename[IOSM_MAX_FILENAME_LEN];
+ u32 default_size;
+ u32 actual_size;
+ u32 entry;
+};
+
+/**
+ * struct iosm_devlink - IOSM Devlink structure
+ * @devlink_sio: SIO instance for read/write functionality
+ * @pcie: Pointer to PCIe component
+ * @dev: Pointer to device struct
+ * @devlink_ctx: Pointer to devlink context
+ * @param: Params required for flashing
+ * @ebl_ctx: Data to be read and written to Modem
+ * @cd_file_info: coredump file info
+ * @iosm_devlink_mdm_coredump: region ops for coredump collection
+ * @cd_regions: coredump regions
+ */
+struct iosm_devlink {
+ struct iosm_devlink_sio devlink_sio;
+ struct iosm_pcie *pcie;
+ struct device *dev;
+ struct devlink *devlink_ctx;
+ struct iosm_flash_params param;
+ struct iosm_ebl_ctx_data ebl_ctx;
+ struct iosm_coredump_file_info *cd_file_info;
+ struct devlink_region_ops iosm_devlink_mdm_coredump[IOSM_NOF_CD_REGION];
+ struct devlink_region *cd_regions[IOSM_NOF_CD_REGION];
+};
+
+/**
+ * union iosm_rpsi_param_u - RPSI cmd param for CRC calculation
+ * @word: Words member used in CRC calculation
+ * @dword: Actual data
+ */
+union iosm_rpsi_param_u {
+ __le16 word[2];
+ __le32 dword;
+};
+
+/**
+ * struct iosm_rpsi_cmd - Structure for RPSI Command
+ * @param: Used to calculate CRC
+ * @cmd: Stores the RPSI command
+ * @crc: Stores the CRC value
+ */
+struct iosm_rpsi_cmd {
+ union iosm_rpsi_param_u param;
+ __le16 cmd;
+ __le16 crc;
+};
+
+struct iosm_devlink *ipc_devlink_init(struct iosm_imem *ipc_imem);
+
+void ipc_devlink_deinit(struct iosm_devlink *ipc_devlink);
+
+int ipc_devlink_send_cmd(struct iosm_devlink *ipc_devlink, u16 cmd, u32 entry);
+
+#endif /* _IOSM_IPC_DEVLINK_H */
diff --git a/drivers/net/wwan/iosm/iosm_ipc_flash.c b/drivers/net/wwan/iosm/iosm_ipc_flash.c
new file mode 100644
index 000000000000..d890914aa349
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_flash.c
@@ -0,0 +1,594 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020-2021 Intel Corporation.
+ */
+
+#include "iosm_ipc_coredump.h"
+#include "iosm_ipc_devlink.h"
+#include "iosm_ipc_flash.h"
+
+/* This function will pack the data to be sent to the modem using the
+ * payload, payload length and pack id
+ */
+static int ipc_flash_proc_format_ebl_pack(struct iosm_flash_data *flash_req,
+ u32 pack_length, u16 pack_id,
+ u8 *payload, u32 payload_length)
+{
+ u16 checksum = pack_id;
+ u32 i;
+
+ if (payload_length + IOSM_EBL_HEAD_SIZE > pack_length)
+ return -EINVAL;
+
+ flash_req->pack_id = cpu_to_le16(pack_id);
+ flash_req->msg_length = cpu_to_le32(payload_length);
+ checksum += (payload_length >> IOSM_EBL_PAYL_SHIFT) +
+ (payload_length & IOSM_EBL_CKSM);
+
+ for (i = 0; i < payload_length; i++)
+ checksum += payload[i];
+
+ flash_req->checksum = cpu_to_le16(checksum);
+
+ return 0;
+}
+
+/* validate the response received from modem and
+ * check the type of errors received
+ */
+static int ipc_flash_proc_check_ebl_rsp(void *hdr_rsp, void *payload_rsp)
+{
+ struct iosm_ebl_error *err_info = payload_rsp;
+ u16 *rsp_code = hdr_rsp;
+ u32 i;
+
+ if (*rsp_code == IOSM_EBL_RSP_BUFF) {
+ for (i = 0; i < IOSM_MAX_ERRORS; i++) {
+ if (!err_info->error[i].error_code) {
+ pr_err("EBL: error_class = %d, error_code = %d",
+ err_info->error[i].error_class,
+ err_info->error[i].error_code);
+ }
+ }
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Send data to the modem */
+static int ipc_flash_send_data(struct iosm_devlink *ipc_devlink, u32 size,
+ u16 pack_id, u8 *payload, u32 payload_length)
+{
+ struct iosm_flash_data flash_req;
+ int ret;
+
+ ret = ipc_flash_proc_format_ebl_pack(&flash_req, size,
+ pack_id, payload, payload_length);
+ if (ret) {
+ dev_err(ipc_devlink->dev, "EBL2 pack failed for pack_id:%d",
+ pack_id);
+ goto ipc_free_payload;
+ }
+
+ ret = ipc_imem_sys_devlink_write(ipc_devlink, (u8 *)&flash_req,
+ IOSM_EBL_HEAD_SIZE);
+ if (ret) {
+ dev_err(ipc_devlink->dev, "EBL Header write failed for Id:%x",
+ pack_id);
+ goto ipc_free_payload;
+ }
+
+ ret = ipc_imem_sys_devlink_write(ipc_devlink, payload, payload_length);
+ if (ret) {
+ dev_err(ipc_devlink->dev, "EBL Payload write failed for Id:%x",
+ pack_id);
+ }
+
+ipc_free_payload:
+ return ret;
+}
+
+/**
+ * ipc_flash_link_establish - Flash link establishment
+ * @ipc_imem: Pointer to struct iosm_imem
+ *
+ * Returns: 0 on success and failure value on error
+ */
+int ipc_flash_link_establish(struct iosm_imem *ipc_imem)
+{
+ u8 ler_data[IOSM_LER_RSP_SIZE];
+ u32 bytes_read;
+
+ /* Allocate channel for flashing/cd collection */
+ ipc_imem->ipc_devlink->devlink_sio.channel =
+ ipc_imem_sys_devlink_open(ipc_imem);
+
+ if (!ipc_imem->ipc_devlink->devlink_sio.channel)
+ goto chl_open_fail;
+
+ if (ipc_imem_sys_devlink_read(ipc_imem->ipc_devlink, ler_data,
+ IOSM_LER_RSP_SIZE, &bytes_read))
+ goto devlink_read_fail;
+
+ if (bytes_read != IOSM_LER_RSP_SIZE)
+ goto devlink_read_fail;
+
+ return 0;
+
+devlink_read_fail:
+ ipc_imem_sys_devlink_close(ipc_imem->ipc_devlink);
+chl_open_fail:
+ return -EIO;
+}
+
+/* Receive data from the modem */
+static int ipc_flash_receive_data(struct iosm_devlink *ipc_devlink, u32 size,
+ u8 *mdm_rsp)
+{
+ u8 mdm_rsp_hdr[IOSM_EBL_HEAD_SIZE];
+ u32 bytes_read;
+ int ret;
+
+ ret = ipc_imem_sys_devlink_read(ipc_devlink, mdm_rsp_hdr,
+ IOSM_EBL_HEAD_SIZE, &bytes_read);
+ if (ret) {
+ dev_err(ipc_devlink->dev, "EBL rsp to read %d bytes failed",
+ IOSM_EBL_HEAD_SIZE);
+ goto ipc_flash_recv_err;
+ }
+
+ if (bytes_read != IOSM_EBL_HEAD_SIZE) {
+ ret = -EINVAL;
+ goto ipc_flash_recv_err;
+ }
+
+ ret = ipc_imem_sys_devlink_read(ipc_devlink, mdm_rsp, size,
+ &bytes_read);
+ if (ret) {
+ dev_err(ipc_devlink->dev, "EBL rsp to read %d bytes failed",
+ size);
+ goto ipc_flash_recv_err;
+ }
+
+ if (bytes_read != size) {
+ ret = -EINVAL;
+ goto ipc_flash_recv_err;
+ }
+
+ ret = ipc_flash_proc_check_ebl_rsp(mdm_rsp_hdr + 2, mdm_rsp);
+
+ipc_flash_recv_err:
+ return ret;
+}
+
+/* Function to send command to modem and receive response */
+static int ipc_flash_send_receive(struct iosm_devlink *ipc_devlink, u16 pack_id,
+ u8 *payload, u32 payload_length, u8 *mdm_rsp)
+{
+ size_t frame_len = IOSM_EBL_DW_PACK_SIZE;
+ int ret;
+
+ if (pack_id == FLASH_SET_PROT_CONF)
+ frame_len = IOSM_EBL_W_PACK_SIZE;
+
+ ret = ipc_flash_send_data(ipc_devlink, frame_len, pack_id, payload,
+ payload_length);
+ if (ret)
+ goto ipc_flash_send_rcv;
+
+ ret = ipc_flash_receive_data(ipc_devlink,
+ frame_len - IOSM_EBL_HEAD_SIZE, mdm_rsp);
+
+ipc_flash_send_rcv:
+ return ret;
+}
+
+/**
+ * ipc_flash_boot_set_capabilities - Set modem boot capabilities in flash
+ * @ipc_devlink: Pointer to devlink structure
+ * @mdm_rsp: Pointer to modem response buffer
+ *
+ * Returns: 0 on success and failure value on error
+ */
+int ipc_flash_boot_set_capabilities(struct iosm_devlink *ipc_devlink,
+ u8 *mdm_rsp)
+{
+ ipc_devlink->ebl_ctx.ebl_sw_info_version =
+ ipc_devlink->ebl_ctx.m_ebl_resp[EBL_RSP_SW_INFO_VER];
+ ipc_devlink->ebl_ctx.m_ebl_resp[EBL_SKIP_ERASE] = IOSM_CAP_NOT_ENHANCED;
+ ipc_devlink->ebl_ctx.m_ebl_resp[EBL_SKIP_CRC] = IOSM_CAP_NOT_ENHANCED;
+
+ if (ipc_devlink->ebl_ctx.m_ebl_resp[EBL_CAPS_FLAG] &
+ IOSM_CAP_USE_EXT_CAP) {
+ if (ipc_devlink->param.erase_full_flash)
+ ipc_devlink->ebl_ctx.m_ebl_resp[EBL_OOS_CONFIG] &=
+ ~((u8)IOSM_EXT_CAP_ERASE_ALL);
+ else
+ ipc_devlink->ebl_ctx.m_ebl_resp[EBL_OOS_CONFIG] &=
+ ~((u8)IOSM_EXT_CAP_COMMIT_ALL);
+ ipc_devlink->ebl_ctx.m_ebl_resp[EBL_EXT_CAPS_HANDLED] =
+ IOSM_CAP_USE_EXT_CAP;
+ }
+
+ /* Write back the EBL capability to modem
+ * Request Set Protcnf command
+ */
+ return ipc_flash_send_receive(ipc_devlink, FLASH_SET_PROT_CONF,
+ ipc_devlink->ebl_ctx.m_ebl_resp,
+ IOSM_EBL_RSP_SIZE, mdm_rsp);
+}
+
+/* Read the SWID type and SWID value from the EBL */
+int ipc_flash_read_swid(struct iosm_devlink *ipc_devlink, u8 *mdm_rsp)
+{
+ struct iosm_flash_msg_control cmd_msg;
+ struct iosm_swid_table *swid;
+ char ebl_swid[IOSM_SWID_STR];
+ int ret;
+
+ if (ipc_devlink->ebl_ctx.ebl_sw_info_version !=
+ IOSM_EXT_CAP_SWID_OOS_PACK)
+ return -EINVAL;
+
+ cmd_msg.action = cpu_to_le32(FLASH_OOSC_ACTION_READ);
+ cmd_msg.type = cpu_to_le32(FLASH_OOSC_TYPE_SWID_TABLE);
+ cmd_msg.length = cpu_to_le32(IOSM_MSG_LEN_ARG);
+ cmd_msg.arguments = cpu_to_le32(IOSM_MSG_LEN_ARG);
+
+ ret = ipc_flash_send_receive(ipc_devlink, FLASH_OOS_CONTROL,
+ (u8 *)&cmd_msg, IOSM_MDM_SEND_16, mdm_rsp);
+ if (ret)
+ goto ipc_swid_err;
+
+ cmd_msg.action = cpu_to_le32(*((u32 *)mdm_rsp));
+
+ ret = ipc_flash_send_receive(ipc_devlink, FLASH_OOS_DATA_READ,
+ (u8 *)&cmd_msg, IOSM_MDM_SEND_4, mdm_rsp);
+ if (ret)
+ goto ipc_swid_err;
+
+ swid = (struct iosm_swid_table *)mdm_rsp;
+ dev_dbg(ipc_devlink->dev, "SWID %x RF_ENGINE_ID %x", swid->sw_id_val,
+ swid->rf_engine_id_val);
+
+ snprintf(ebl_swid, sizeof(ebl_swid), "SWID: %x, RF_ENGINE_ID: %x",
+ swid->sw_id_val, swid->rf_engine_id_val);
+
+ devlink_flash_update_status_notify(ipc_devlink->devlink_ctx, ebl_swid,
+ NULL, 0, 0);
+ipc_swid_err:
+ return ret;
+}
+
+/* Function to check if full erase or conditional erase was successful */
+static int ipc_flash_erase_check(struct iosm_devlink *ipc_devlink, u8 *mdm_rsp)
+{
+ int ret, count = 0;
+ u16 mdm_rsp_data;
+
+ /* Request Flash Erase Check */
+ do {
+ mdm_rsp_data = IOSM_MDM_SEND_DATA;
+ ret = ipc_flash_send_receive(ipc_devlink, FLASH_ERASE_CHECK,
+ (u8 *)&mdm_rsp_data,
+ IOSM_MDM_SEND_2, mdm_rsp);
+ if (ret)
+ goto ipc_erase_chk_err;
+
+ mdm_rsp_data = *((u16 *)mdm_rsp);
+ if (mdm_rsp_data > IOSM_MDM_ERASE_RSP) {
+ dev_err(ipc_devlink->dev,
+ "Flash Erase Check resp wrong 0x%04X",
+ mdm_rsp_data);
+ ret = -EINVAL;
+ goto ipc_erase_chk_err;
+ }
+ count++;
+ msleep(IOSM_FLASH_ERASE_CHECK_INTERVAL);
+ } while ((mdm_rsp_data != IOSM_MDM_ERASE_RSP) &&
+ (count < (IOSM_FLASH_ERASE_CHECK_TIMEOUT /
+ IOSM_FLASH_ERASE_CHECK_INTERVAL)));
+
+ if (mdm_rsp_data != IOSM_MDM_ERASE_RSP) {
+ dev_err(ipc_devlink->dev, "Modem erase check timeout failure!");
+ ret = -ETIMEDOUT;
+ }
+
+ipc_erase_chk_err:
+ return ret;
+}
+
+/* Full erase function which will erase the nand flash through EBL command */
+static int ipc_flash_full_erase(struct iosm_devlink *ipc_devlink, u8 *mdm_rsp)
+{
+ u32 erase_address = IOSM_ERASE_START_ADDR;
+ struct iosm_flash_msg_control cmd_msg;
+ u32 erase_length = IOSM_ERASE_LEN;
+ int ret;
+
+ dev_dbg(ipc_devlink->dev, "Erase full nand flash");
+ cmd_msg.action = cpu_to_le32(FLASH_OOSC_ACTION_ERASE);
+ cmd_msg.type = cpu_to_le32(FLASH_OOSC_TYPE_ALL_FLASH);
+ cmd_msg.length = cpu_to_le32(erase_length);
+ cmd_msg.arguments = cpu_to_le32(erase_address);
+
+ ret = ipc_flash_send_receive(ipc_devlink, FLASH_OOS_CONTROL,
+ (unsigned char *)&cmd_msg,
+ IOSM_MDM_SEND_16, mdm_rsp);
+ if (ret)
+ goto ipc_flash_erase_err;
+
+ ipc_devlink->param.erase_full_flash_done = IOSM_SET_FLAG;
+ ret = ipc_flash_erase_check(ipc_devlink, mdm_rsp);
+
+ipc_flash_erase_err:
+ return ret;
+}
+
+/* Logic for flashing all the Loadmaps available for individual fls file */
+static int ipc_flash_download_region(struct iosm_devlink *ipc_devlink,
+ const struct firmware *fw, u8 *mdm_rsp)
+{
+ u32 raw_len, rest_len = fw->size - IOSM_DEVLINK_HDR_SIZE;
+ struct iosm_devlink_image *fls_data;
+ __le32 reg_info[2]; /* 0th position region address, 1st position size */
+ u32 nand_address;
+ char *file_ptr;
+ int ret;
+
+ fls_data = (struct iosm_devlink_image *)fw->data;
+ file_ptr = (void *)(fls_data + 1);
+ nand_address = le32_to_cpu(fls_data->region_address);
+ reg_info[0] = cpu_to_le32(nand_address);
+
+ if (!ipc_devlink->param.erase_full_flash_done) {
+ reg_info[1] = cpu_to_le32(nand_address + rest_len - 2);
+ ret = ipc_flash_send_receive(ipc_devlink, FLASH_ERASE_START,
+ (u8 *)reg_info, IOSM_MDM_SEND_8,
+ mdm_rsp);
+ if (ret)
+ goto dl_region_fail;
+
+ ret = ipc_flash_erase_check(ipc_devlink, mdm_rsp);
+ if (ret)
+ goto dl_region_fail;
+ }
+
+ /* Request Flash Set Address */
+ ret = ipc_flash_send_receive(ipc_devlink, FLASH_SET_ADDRESS,
+ (u8 *)reg_info, IOSM_MDM_SEND_4, mdm_rsp);
+ if (ret)
+ goto dl_region_fail;
+
+ /* Request Flash Write Raw Image */
+ ret = ipc_flash_send_data(ipc_devlink, IOSM_EBL_DW_PACK_SIZE,
+ FLASH_WRITE_IMAGE_RAW, (u8 *)&rest_len,
+ IOSM_MDM_SEND_4);
+ if (ret)
+ goto dl_region_fail;
+
+ do {
+ raw_len = (rest_len > IOSM_FLS_BUF_SIZE) ? IOSM_FLS_BUF_SIZE :
+ rest_len;
+ ret = ipc_imem_sys_devlink_write(ipc_devlink, file_ptr,
+ raw_len);
+ if (ret) {
+ dev_err(ipc_devlink->dev, "Image write failed");
+ goto dl_region_fail;
+ }
+ file_ptr += raw_len;
+ rest_len -= raw_len;
+ } while (rest_len);
+
+ ret = ipc_flash_receive_data(ipc_devlink, IOSM_EBL_DW_PAYL_SIZE,
+ mdm_rsp);
+
+dl_region_fail:
+ return ret;
+}
+
+/**
+ * ipc_flash_send_fls - Inject Modem subsystem fls file to device
+ * @ipc_devlink: Pointer to devlink structure
+ * @fw: FW image
+ * @mdm_rsp: Pointer to modem response buffer
+ *
+ * Returns: 0 on success and failure value on error
+ */
+int ipc_flash_send_fls(struct iosm_devlink *ipc_devlink,
+ const struct firmware *fw, u8 *mdm_rsp)
+{
+ u32 fw_size = fw->size - IOSM_DEVLINK_HDR_SIZE;
+ struct iosm_devlink_image *fls_data;
+ u16 flash_cmd;
+ int ret;
+
+ fls_data = (struct iosm_devlink_image *)fw->data;
+ if (ipc_devlink->param.erase_full_flash) {
+ ipc_devlink->param.erase_full_flash = false;
+ ret = ipc_flash_full_erase(ipc_devlink, mdm_rsp);
+ if (ret)
+ goto ipc_flash_err;
+ }
+
+ /* Request Sec Start */
+ if (!fls_data->download_region) {
+ ret = ipc_flash_send_receive(ipc_devlink, FLASH_SEC_START,
+ (u8 *)fw->data +
+ IOSM_DEVLINK_HDR_SIZE, fw_size,
+ mdm_rsp);
+ if (ret)
+ goto ipc_flash_err;
+ } else {
+ /* Download regions */
+ ret = ipc_flash_download_region(ipc_devlink, fw, mdm_rsp);
+ if (ret)
+ goto ipc_flash_err;
+
+ if (fls_data->last_region) {
+ /* Request Sec End */
+ flash_cmd = IOSM_MDM_SEND_DATA;
+ ret = ipc_flash_send_receive(ipc_devlink, FLASH_SEC_END,
+ (u8 *)&flash_cmd,
+ IOSM_MDM_SEND_2, mdm_rsp);
+ }
+ }
+
+ipc_flash_err:
+ return ret;
+}
+
+/**
+ * ipc_flash_boot_psi - Inject PSI image
+ * @ipc_devlink: Pointer to devlink structure
+ * @fw: FW image
+ *
+ * Returns: 0 on success and failure value on error
+ */
+int ipc_flash_boot_psi(struct iosm_devlink *ipc_devlink,
+ const struct firmware *fw)
+{
+ u32 bytes_read, psi_size = fw->size - IOSM_DEVLINK_HDR_SIZE;
+ u8 psi_ack_byte[IOSM_PSI_ACK], read_data[2];
+ u8 *psi_code;
+ int ret;
+
+ dev_dbg(ipc_devlink->dev, "Boot transfer PSI");
+ psi_code = kmemdup(fw->data + IOSM_DEVLINK_HDR_SIZE, psi_size,
+ GFP_KERNEL);
+ if (!psi_code)
+ return -ENOMEM;
+
+ ret = ipc_imem_sys_devlink_write(ipc_devlink, psi_code, psi_size);
+ if (ret) {
+ dev_err(ipc_devlink->dev, "RPSI Image write failed");
+ goto ipc_flash_psi_free;
+ }
+
+ ret = ipc_imem_sys_devlink_read(ipc_devlink, read_data,
+ IOSM_LER_ACK_SIZE, &bytes_read);
+ if (ret) {
+ dev_err(ipc_devlink->dev, "ipc_devlink_sio_read ACK failed");
+ goto ipc_flash_psi_free;
+ }
+
+ if (bytes_read != IOSM_LER_ACK_SIZE) {
+ ret = -EINVAL;
+ goto ipc_flash_psi_free;
+ }
+
+ snprintf(psi_ack_byte, sizeof(psi_ack_byte), "%x%x", read_data[0],
+ read_data[1]);
+ devlink_flash_update_status_notify(ipc_devlink->devlink_ctx,
+ psi_ack_byte, "PSI ACK", 0, 0);
+
+ if (read_data[0] == 0x00 && read_data[1] == 0xCD) {
+ dev_dbg(ipc_devlink->dev, "Coredump detected");
+ ret = ipc_coredump_get_list(ipc_devlink,
+ rpsi_cmd_coredump_start);
+ if (ret)
+ dev_err(ipc_devlink->dev, "Failed to get cd list");
+ }
+
+ipc_flash_psi_free:
+ kfree(psi_code);
+ return ret;
+}
+
+/**
+ * ipc_flash_boot_ebl - Inject EBL image
+ * @ipc_devlink: Pointer to devlink structure
+ * @fw: FW image
+ *
+ * Returns: 0 on success and failure value on error
+ */
+int ipc_flash_boot_ebl(struct iosm_devlink *ipc_devlink,
+ const struct firmware *fw)
+{
+ u32 ebl_size = fw->size - IOSM_DEVLINK_HDR_SIZE;
+ u8 read_data[2];
+ u32 bytes_read;
+ int ret;
+
+ if (ipc_mmio_get_exec_stage(ipc_devlink->pcie->imem->mmio) !=
+ IPC_MEM_EXEC_STAGE_PSI) {
+ devlink_flash_update_status_notify(ipc_devlink->devlink_ctx,
+ "Invalid execution stage",
+ NULL, 0, 0);
+ return -EINVAL;
+ }
+
+ dev_dbg(ipc_devlink->dev, "Boot transfer EBL");
+ ret = ipc_devlink_send_cmd(ipc_devlink, rpsi_cmd_code_ebl,
+ IOSM_RPSI_LOAD_SIZE);
+ if (ret) {
+ dev_err(ipc_devlink->dev, "Sending rpsi_cmd_code_ebl failed");
+ goto ipc_flash_ebl_err;
+ }
+
+ ret = ipc_imem_sys_devlink_read(ipc_devlink, read_data, IOSM_READ_SIZE,
+ &bytes_read);
+ if (ret) {
+ dev_err(ipc_devlink->dev, "rpsi_cmd_code_ebl read failed");
+ goto ipc_flash_ebl_err;
+ }
+
+ if (bytes_read != IOSM_READ_SIZE) {
+ ret = -EINVAL;
+ goto ipc_flash_ebl_err;
+ }
+
+ ret = ipc_imem_sys_devlink_write(ipc_devlink, (u8 *)&ebl_size,
+ sizeof(ebl_size));
+ if (ret) {
+ dev_err(ipc_devlink->dev, "EBL length write failed");
+ goto ipc_flash_ebl_err;
+ }
+
+ ret = ipc_imem_sys_devlink_read(ipc_devlink, read_data, IOSM_READ_SIZE,
+ &bytes_read);
+ if (ret) {
+ dev_err(ipc_devlink->dev, "EBL read failed");
+ goto ipc_flash_ebl_err;
+ }
+
+ if (bytes_read != IOSM_READ_SIZE) {
+ ret = -EINVAL;
+ goto ipc_flash_ebl_err;
+ }
+
+ ret = ipc_imem_sys_devlink_write(ipc_devlink,
+ (u8 *)fw->data + IOSM_DEVLINK_HDR_SIZE,
+ ebl_size);
+ if (ret) {
+ dev_err(ipc_devlink->dev, "EBL data transfer failed");
+ goto ipc_flash_ebl_err;
+ }
+
+ ret = ipc_imem_sys_devlink_read(ipc_devlink, read_data, IOSM_READ_SIZE,
+ &bytes_read);
+ if (ret) {
+ dev_err(ipc_devlink->dev, "EBL read failed");
+ goto ipc_flash_ebl_err;
+ }
+
+ if (bytes_read != IOSM_READ_SIZE) {
+ ret = -EINVAL;
+ goto ipc_flash_ebl_err;
+ }
+
+ ret = ipc_imem_sys_devlink_read(ipc_devlink,
+ ipc_devlink->ebl_ctx.m_ebl_resp,
+ IOSM_EBL_RSP_SIZE, &bytes_read);
+ if (ret) {
+ dev_err(ipc_devlink->dev, "EBL response read failed");
+ goto ipc_flash_ebl_err;
+ }
+
+ if (bytes_read != IOSM_EBL_RSP_SIZE)
+ ret = -EINVAL;
+
+ipc_flash_ebl_err:
+ return ret;
+}
diff --git a/drivers/net/wwan/iosm/iosm_ipc_flash.h b/drivers/net/wwan/iosm/iosm_ipc_flash.h
new file mode 100644
index 000000000000..132d59d60fbe
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_flash.h
@@ -0,0 +1,229 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (C) 2020-2021 Intel Corporation.
+ */
+
+#ifndef _IOSM_IPC_FLASH_H
+#define _IOSM_IPC_FLASH_H
+
+/* Buffer size used to read the fls image */
+#define IOSM_FLS_BUF_SIZE 0x00100000
+/* Full erase start address */
+#define IOSM_ERASE_START_ADDR 0x00000000
+/* Erase length for NAND flash */
+#define IOSM_ERASE_LEN 0xFFFFFFFF
+/* EBL response Header size */
+#define IOSM_EBL_HEAD_SIZE 8
+/* EBL payload size */
+#define IOSM_EBL_W_PAYL_SIZE 2048
+/* Total EBL pack size */
+#define IOSM_EBL_W_PACK_SIZE (IOSM_EBL_HEAD_SIZE + IOSM_EBL_W_PAYL_SIZE)
+/* EBL payload size */
+#define IOSM_EBL_DW_PAYL_SIZE 16384
+/* Total EBL pack size */
+#define IOSM_EBL_DW_PACK_SIZE (IOSM_EBL_HEAD_SIZE + IOSM_EBL_DW_PAYL_SIZE)
+/* EBL name size */
+#define IOSM_EBL_NAME 32
+/* Maximum supported error types */
+#define IOSM_MAX_ERRORS 8
+/* Read size for RPSI/EBL response */
+#define IOSM_READ_SIZE 2
+/* Link establishment response ack size */
+#define IOSM_LER_ACK_SIZE 2
+/* PSI ACK len */
+#define IOSM_PSI_ACK 8
+/* SWID capability for packed swid type */
+#define IOSM_EXT_CAP_SWID_OOS_PACK 0x02
+/* EBL error response buffer */
+#define IOSM_EBL_RSP_BUFF 0x0041
+/* SWID string length */
+#define IOSM_SWID_STR 64
+/* Load EBL command size */
+#define IOSM_RPSI_LOAD_SIZE 0
+/* EBL payload checksum */
+#define IOSM_EBL_CKSM 0x0000FFFF
+/* SWID msg len and argument */
+#define IOSM_MSG_LEN_ARG 0
+/* Data to be sent to modem */
+#define IOSM_MDM_SEND_DATA 0x0000
+/* Data received from modem as part of erase check */
+#define IOSM_MDM_ERASE_RSP 0x0001
+/* Bit shift to calculate Checksum */
+#define IOSM_EBL_PAYL_SHIFT 16
+/* Flag To be set */
+#define IOSM_SET_FLAG 1
+/* Set flash erase check timeout to 100 msec */
+#define IOSM_FLASH_ERASE_CHECK_TIMEOUT 100
+/* Set flash erase check interval to 20 msec */
+#define IOSM_FLASH_ERASE_CHECK_INTERVAL 20
+/* Link establishment response ack size */
+#define IOSM_LER_RSP_SIZE 60
+
+/**
+ * enum iosm_flash_package_type - Enum for the flashing operations
+ * @FLASH_SET_PROT_CONF: Write EBL capabilities
+ * @FLASH_SEC_START: Start writing the secpack
+ * @FLASH_SEC_END: Validate secpack end
+ * @FLASH_SET_ADDRESS: Set the address for flashing
+ * @FLASH_ERASE_START: Start erase before flashing
+ * @FLASH_ERASE_CHECK: Validate the erase functionality
+ * @FLASH_OOS_CONTROL: Retrieve data based on oos actions
+ * @FLASH_OOS_DATA_READ: Read data from EBL
+ * @FLASH_WRITE_IMAGE_RAW: Write the raw image to flash
+ */
+enum iosm_flash_package_type {
+ FLASH_SET_PROT_CONF = 0x0086,
+ FLASH_SEC_START = 0x0204,
+ FLASH_SEC_END,
+ FLASH_SET_ADDRESS = 0x0802,
+ FLASH_ERASE_START = 0x0805,
+ FLASH_ERASE_CHECK,
+ FLASH_OOS_CONTROL = 0x080C,
+ FLASH_OOS_DATA_READ = 0x080E,
+ FLASH_WRITE_IMAGE_RAW,
+};
+
+/**
+ * enum iosm_out_of_session_action - Actions possible over the
+ * OutOfSession command interface
+ * @FLASH_OOSC_ACTION_READ: Read data according to its type
+ * @FLASH_OOSC_ACTION_ERASE: Erase data according to its type
+ */
+enum iosm_out_of_session_action {
+ FLASH_OOSC_ACTION_READ = 2,
+ FLASH_OOSC_ACTION_ERASE = 3,
+};
+
+/**
+ * enum iosm_out_of_session_type - Data types that can be handled over the
+ * Out Of Session command Interface
+ * @FLASH_OOSC_TYPE_ALL_FLASH: The whole flash area
+ * @FLASH_OOSC_TYPE_SWID_TABLE: Read the swid table from the target
+ */
+enum iosm_out_of_session_type {
+ FLASH_OOSC_TYPE_ALL_FLASH = 8,
+ FLASH_OOSC_TYPE_SWID_TABLE = 16,
+};
+
+/**
+ * enum iosm_ebl_caps - EBL capability settings
+ * @IOSM_CAP_NOT_ENHANCED: If capability not supported
+ * @IOSM_CAP_USE_EXT_CAP: To be set if extended capability is set
+ * @IOSM_EXT_CAP_ERASE_ALL: Set Erase all capability
+ * @IOSM_EXT_CAP_COMMIT_ALL: Set the commit all capability
+ */
+enum iosm_ebl_caps {
+ IOSM_CAP_NOT_ENHANCED = 0x00,
+ IOSM_CAP_USE_EXT_CAP = 0x01,
+ IOSM_EXT_CAP_ERASE_ALL = 0x08,
+ IOSM_EXT_CAP_COMMIT_ALL = 0x20,
+};
+
+/**
+ * enum iosm_ebl_rsp - EBL response field
+ * @EBL_CAPS_FLAG: EBL capability flag
+ * @EBL_SKIP_ERASE: EBL skip erase flag
+ * @EBL_SKIP_CRC: EBL skip wr_pack crc
+ * @EBL_EXT_CAPS_HANDLED: EBL extended capability handled flag
+ * @EBL_OOS_CONFIG: EBL oos configuration
+ * @EBL_RSP_SW_INFO_VER: EBL SW info version
+ */
+enum iosm_ebl_rsp {
+ EBL_CAPS_FLAG = 50,
+ EBL_SKIP_ERASE = 54,
+ EBL_SKIP_CRC = 55,
+ EBL_EXT_CAPS_HANDLED = 57,
+ EBL_OOS_CONFIG = 64,
+ EBL_RSP_SW_INFO_VER = 70,
+};
+
+/**
+ * enum iosm_mdm_send_recv_data - Data to send to modem
+ * @IOSM_MDM_SEND_2: Send 2 bytes of payload
+ * @IOSM_MDM_SEND_4: Send 4 bytes of payload
+ * @IOSM_MDM_SEND_8: Send 8 bytes of payload
+ * @IOSM_MDM_SEND_16: Send 16 bytes of payload
+ */
+enum iosm_mdm_send_recv_data {
+ IOSM_MDM_SEND_2 = 2,
+ IOSM_MDM_SEND_4 = 4,
+ IOSM_MDM_SEND_8 = 8,
+ IOSM_MDM_SEND_16 = 16,
+};
+
+/**
+ * struct iosm_ebl_one_error - Structure containing error details
+ * @error_class: Error type- standard, security and text error
+ * @error_code: Specific error from error type
+ */
+struct iosm_ebl_one_error {
+ u16 error_class;
+ u16 error_code;
+};
+
+/**
+ * struct iosm_ebl_error- Structure with max error type supported
+ * @error: Array of one_error structure with max errors
+ */
+struct iosm_ebl_error {
+ struct iosm_ebl_one_error error[IOSM_MAX_ERRORS];
+};
+
+/**
+ * struct iosm_swid_table - SWID table data for modem
+ * @number_of_data_sets: Number of swid types
+ * @sw_id_type: SWID type - SWID
+ * @sw_id_val: SWID value
+ * @rf_engine_id_type: RF engine ID type - RF_ENGINE_ID
+ * @rf_engine_id_val: RF engine ID value
+ */
+struct iosm_swid_table {
+ u32 number_of_data_sets;
+ char sw_id_type[IOSM_EBL_NAME];
+ u32 sw_id_val;
+ char rf_engine_id_type[IOSM_EBL_NAME];
+ u32 rf_engine_id_val;
+};
+
+/**
+ * struct iosm_flash_msg_control - Data sent to modem
+ * @action: Action to be performed
+ * @type: Type of action
+ * @length: Length of the action
+ * @arguments: Argument value sent to modem
+ */
+struct iosm_flash_msg_control {
+ __le32 action;
+ __le32 type;
+ __le32 length;
+ __le32 arguments;
+};
+
+/**
+ * struct iosm_flash_data - Header Data to be sent to modem
+ * @checksum: Checksum value calculated for the payload data
+ * @pack_id: Flash Action type
+ * @msg_length: Payload length
+ */
+struct iosm_flash_data {
+ __le16 checksum;
+ __le16 pack_id;
+ __le32 msg_length;
+};
+
+int ipc_flash_boot_psi(struct iosm_devlink *ipc_devlink,
+ const struct firmware *fw);
+
+int ipc_flash_boot_ebl(struct iosm_devlink *ipc_devlink,
+ const struct firmware *fw);
+
+int ipc_flash_boot_set_capabilities(struct iosm_devlink *ipc_devlink,
+ u8 *mdm_rsp);
+
+int ipc_flash_link_establish(struct iosm_imem *ipc_imem);
+
+int ipc_flash_read_swid(struct iosm_devlink *ipc_devlink, u8 *mdm_rsp);
+
+int ipc_flash_send_fls(struct iosm_devlink *ipc_devlink,
+ const struct firmware *fw, u8 *mdm_rsp);
+#endif
diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem.c b/drivers/net/wwan/iosm/iosm_ipc_imem.c
index 9f00e36b7f79..cff3b43ca4d7 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_imem.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_imem.c
@@ -6,6 +6,8 @@
#include <linux/delay.h>
#include "iosm_ipc_chnl_cfg.h"
+#include "iosm_ipc_devlink.h"
+#include "iosm_ipc_flash.h"
#include "iosm_ipc_imem.h"
#include "iosm_ipc_port.h"
@@ -263,9 +265,12 @@ static void ipc_imem_dl_skb_process(struct iosm_imem *ipc_imem,
switch (pipe->channel->ctype) {
case IPC_CTYPE_CTRL:
port_id = pipe->channel->channel_id;
-
- /* Pass the packet to the wwan layer. */
- wwan_port_rx(ipc_imem->ipc_port[port_id]->iosm_port, skb);
+ if (port_id == IPC_MEM_CTRL_CHL_ID_7)
+ ipc_imem_sys_devlink_notify_rx(ipc_imem->ipc_devlink,
+ skb);
+ else
+ wwan_port_rx(ipc_imem->ipc_port[port_id]->iosm_port,
+ skb);
break;
case IPC_CTYPE_WWAN:
@@ -399,19 +404,8 @@ static void ipc_imem_rom_irq_exec(struct iosm_imem *ipc_imem)
{
struct ipc_mem_channel *channel;
- if (ipc_imem->flash_channel_id < 0) {
- ipc_imem->rom_exit_code = IMEM_ROM_EXIT_FAIL;
- dev_err(ipc_imem->dev, "Missing flash app:%d",
- ipc_imem->flash_channel_id);
- return;
- }
-
+ channel = ipc_imem->ipc_devlink->devlink_sio.channel;
ipc_imem->rom_exit_code = ipc_mmio_get_rom_exit_code(ipc_imem->mmio);
-
- /* Wake up the flash app to continue or to terminate depending
- * on the CP ROM exit code.
- */
- channel = &ipc_imem->channels[ipc_imem->flash_channel_id];
complete(&channel->ul_sem);
}
@@ -482,8 +476,8 @@ static enum hrtimer_restart ipc_imem_startup_timer_cb(struct hrtimer *hr_timer)
container_of(hr_timer, struct iosm_imem, startup_timer);
if (ktime_to_ns(ipc_imem->hrtimer_period)) {
- hrtimer_forward(&ipc_imem->startup_timer, ktime_get(),
- ipc_imem->hrtimer_period);
+ hrtimer_forward_now(&ipc_imem->startup_timer,
+ ipc_imem->hrtimer_period);
result = HRTIMER_RESTART;
}
@@ -572,7 +566,7 @@ static void ipc_imem_handle_irq(struct iosm_imem *ipc_imem, int irq)
enum ipc_phase old_phase, phase;
bool retry_allocation = false;
bool ul_pending = false;
- int ch_id, i;
+ int i;
if (irq != IMEM_IRQ_DONT_CARE)
ipc_imem->ev_irq_pending[irq] = false;
@@ -696,11 +690,8 @@ static void ipc_imem_handle_irq(struct iosm_imem *ipc_imem, int irq)
if ((phase == IPC_P_PSI || phase == IPC_P_EBL) &&
ipc_imem->ipc_requested_state == IPC_MEM_DEVICE_IPC_RUNNING &&
ipc_mmio_get_ipc_state(ipc_imem->mmio) ==
- IPC_MEM_DEVICE_IPC_RUNNING &&
- ipc_imem->flash_channel_id >= 0) {
- /* Wake up the flash app to open the pipes. */
- ch_id = ipc_imem->flash_channel_id;
- complete(&ipc_imem->channels[ch_id].ul_sem);
+ IPC_MEM_DEVICE_IPC_RUNNING) {
+ complete(&ipc_imem->ipc_devlink->devlink_sio.channel->ul_sem);
}
/* Reset the expected CP state. */
@@ -1176,6 +1167,9 @@ void ipc_imem_cleanup(struct iosm_imem *ipc_imem)
ipc_port_deinit(ipc_imem->ipc_port);
}
+ if (ipc_imem->ipc_devlink)
+ ipc_devlink_deinit(ipc_imem->ipc_devlink);
+
ipc_imem_device_ipc_uninit(ipc_imem);
ipc_imem_channel_reset(ipc_imem);
@@ -1258,6 +1252,7 @@ struct iosm_imem *ipc_imem_init(struct iosm_pcie *pcie, unsigned int device_id,
void __iomem *mmio, struct device *dev)
{
struct iosm_imem *ipc_imem = kzalloc(sizeof(*pcie->imem), GFP_KERNEL);
+ enum ipc_mem_exec_stage stage;
if (!ipc_imem)
return NULL;
@@ -1272,9 +1267,6 @@ struct iosm_imem *ipc_imem_init(struct iosm_pcie *pcie, unsigned int device_id,
ipc_imem->cp_version = 0;
ipc_imem->device_sleep = IPC_HOST_SLEEP_ENTER_SLEEP;
- /* Reset the flash channel id. */
- ipc_imem->flash_channel_id = -1;
-
/* Reset the max number of configured channels */
ipc_imem->nr_of_channels = 0;
@@ -1328,8 +1320,21 @@ struct iosm_imem *ipc_imem_init(struct iosm_pcie *pcie, unsigned int device_id,
goto imem_config_fail;
}
- return ipc_imem;
+ stage = ipc_mmio_get_exec_stage(ipc_imem->mmio);
+ if (stage == IPC_MEM_EXEC_STAGE_BOOT) {
+ /* Alloc and Register devlink */
+ ipc_imem->ipc_devlink = ipc_devlink_init(ipc_imem);
+ if (!ipc_imem->ipc_devlink) {
+ dev_err(ipc_imem->dev, "Devlink register failed");
+ goto imem_config_fail;
+ }
+ if (ipc_flash_link_establish(ipc_imem))
+ goto devlink_channel_fail;
+ }
+ return ipc_imem;
+devlink_channel_fail:
+ ipc_devlink_deinit(ipc_imem->ipc_devlink);
imem_config_fail:
hrtimer_cancel(&ipc_imem->td_alloc_timer);
hrtimer_cancel(&ipc_imem->fast_update_timer);
@@ -1361,3 +1366,51 @@ void ipc_imem_td_update_timer_suspend(struct iosm_imem *ipc_imem, bool suspend)
{
ipc_imem->td_update_timer_suspended = suspend;
}
+
+/* Verify the CP execution state, copy the chip info,
+ * change the execution phase to ROM
+ */
+static int ipc_imem_devlink_trigger_chip_info_cb(struct iosm_imem *ipc_imem,
+ int arg, void *msg,
+ size_t msgsize)
+{
+ enum ipc_mem_exec_stage stage;
+ struct sk_buff *skb;
+ int rc = -EINVAL;
+ size_t size;
+
+ /* Test the CP execution state. */
+ stage = ipc_mmio_get_exec_stage(ipc_imem->mmio);
+ if (stage != IPC_MEM_EXEC_STAGE_BOOT) {
+ dev_err(ipc_imem->dev,
+ "Execution_stage: expected BOOT, received = %X", stage);
+ goto trigger_chip_info_fail;
+ }
+ /* Allocate a new sk buf for the chip info. */
+ size = ipc_imem->mmio->chip_info_size;
+ if (size > IOSM_CHIP_INFO_SIZE_MAX)
+ goto trigger_chip_info_fail;
+
+ skb = ipc_pcie_alloc_local_skb(ipc_imem->pcie, GFP_ATOMIC, size);
+ if (!skb) {
+ dev_err(ipc_imem->dev, "exhausted skbuf kernel DL memory");
+ rc = -ENOMEM;
+ goto trigger_chip_info_fail;
+ }
+ /* Copy the chip info characters into the ipc_skb. */
+ ipc_mmio_copy_chip_info(ipc_imem->mmio, skb_put(skb, size), size);
+ /* First change to the ROM boot phase. */
+ dev_dbg(ipc_imem->dev, "execution_stage[%X] eq. BOOT", stage);
+ ipc_imem->phase = ipc_imem_phase_update(ipc_imem);
+ ipc_imem_sys_devlink_notify_rx(ipc_imem->ipc_devlink, skb);
+ rc = 0;
+trigger_chip_info_fail:
+ return rc;
+}
+
+int ipc_imem_devlink_trigger_chip_info(struct iosm_imem *ipc_imem)
+{
+ return ipc_task_queue_send_task(ipc_imem,
+ ipc_imem_devlink_trigger_chip_info_cb,
+ 0, NULL, 0, true);
+}
diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem.h b/drivers/net/wwan/iosm/iosm_ipc_imem.h
index dc65b0712261..6be6708b4eec 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_imem.h
+++ b/drivers/net/wwan/iosm/iosm_ipc_imem.h
@@ -69,7 +69,7 @@ struct ipc_chnl_cfg;
#define IMEM_IRQ_DONT_CARE (-1)
-#define IPC_MEM_MAX_CHANNELS 7
+#define IPC_MEM_MAX_CHANNELS 8
#define IPC_MEM_MUX_IP_SESSION_ENTRIES 8
@@ -98,6 +98,7 @@ struct ipc_chnl_cfg;
#define IPC_MEM_DL_ETH_OFFSET 16
#define IPC_CB(skb) ((struct ipc_skb_cb *)((skb)->cb))
+#define IOSM_CHIP_INFO_SIZE_MAX 100
#define FULLY_FUNCTIONAL 0
@@ -304,9 +305,9 @@ enum ipc_phase {
* @ipc_port: IPC PORT data structure pointer
* @pcie: IPC PCIe
* @dev: Pointer to device structure
- * @flash_channel_id: Reserved channel id for flashing to RAM.
* @ipc_requested_state: Expected IPC state on CP.
* @channels: Channel list with UL/DL pipe pairs.
+ * @ipc_devlink: IPC Devlink data structure pointer
* @ipc_status: local ipc_status
* @nr_of_channels: number of configured channels
* @startup_timer: startup timer for NAND support.
@@ -349,9 +350,9 @@ struct iosm_imem {
struct iosm_cdev *ipc_port[IPC_MEM_MAX_CHANNELS];
struct iosm_pcie *pcie;
struct device *dev;
- int flash_channel_id;
enum ipc_mem_device_ipc_state ipc_requested_state;
struct ipc_mem_channel channels[IPC_MEM_MAX_CHANNELS];
+ struct iosm_devlink *ipc_devlink;
u32 ipc_status;
u32 nr_of_channels;
struct hrtimer startup_timer;
@@ -575,4 +576,15 @@ void ipc_imem_ipc_init_check(struct iosm_imem *ipc_imem);
*/
void ipc_imem_channel_init(struct iosm_imem *ipc_imem, enum ipc_ctype ctype,
struct ipc_chnl_cfg chnl_cfg, u32 irq_moderation);
+
+/**
+ * ipc_imem_devlink_trigger_chip_info - Inform devlink that the chip
+ * information are available if the
+ * flashing to RAM interworking shall be
+ * executed.
+ * @ipc_imem: Pointer to imem structure
+ *
+ * Returns: 0 on success, -1 on failure
+ */
+int ipc_imem_devlink_trigger_chip_info(struct iosm_imem *ipc_imem);
#endif
diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c
index 0a472ce77370..b885a6570235 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c
@@ -6,6 +6,7 @@
#include <linux/delay.h>
#include "iosm_ipc_chnl_cfg.h"
+#include "iosm_ipc_devlink.h"
#include "iosm_ipc_imem.h"
#include "iosm_ipc_imem_ops.h"
#include "iosm_ipc_port.h"
@@ -331,3 +332,319 @@ int ipc_imem_sys_cdev_write(struct iosm_cdev *ipc_cdev, struct sk_buff *skb)
out:
return ret;
}
+
+/* Open a SIO link to CP and return the channel instance */
+struct ipc_mem_channel *ipc_imem_sys_devlink_open(struct iosm_imem *ipc_imem)
+{
+ struct ipc_mem_channel *channel;
+ enum ipc_phase phase;
+ int channel_id;
+
+ phase = ipc_imem_phase_update(ipc_imem);
+ switch (phase) {
+ case IPC_P_OFF:
+ case IPC_P_ROM:
+ /* Get a channel id as flash id and reserve it. */
+ channel_id = ipc_imem_channel_alloc(ipc_imem,
+ IPC_MEM_CTRL_CHL_ID_7,
+ IPC_CTYPE_CTRL);
+
+ if (channel_id < 0) {
+ dev_err(ipc_imem->dev,
+ "reservation of a flash channel id failed");
+ goto error;
+ }
+
+ ipc_imem->ipc_devlink->devlink_sio.channel_id = channel_id;
+ channel = &ipc_imem->channels[channel_id];
+
+ /* Enqueue chip info data to be read */
+ if (ipc_imem_devlink_trigger_chip_info(ipc_imem)) {
+ dev_err(ipc_imem->dev, "Enqueue of chip info failed");
+ channel->state = IMEM_CHANNEL_FREE;
+ goto error;
+ }
+
+ return channel;
+
+ case IPC_P_PSI:
+ case IPC_P_EBL:
+ ipc_imem->cp_version = ipc_mmio_get_cp_version(ipc_imem->mmio);
+ if (ipc_imem->cp_version == -1) {
+ dev_err(ipc_imem->dev, "invalid CP version");
+ goto error;
+ }
+
+ channel_id = ipc_imem->ipc_devlink->devlink_sio.channel_id;
+ return ipc_imem_channel_open(ipc_imem, channel_id,
+ IPC_HP_CDEV_OPEN);
+
+ default:
+ /* CP is in the wrong state (e.g. CRASH or CD_READY) */
+ dev_err(ipc_imem->dev, "SIO open refused, phase %d", phase);
+ }
+error:
+ return NULL;
+}
+
+/* Release a SIO channel link to CP. */
+void ipc_imem_sys_devlink_close(struct iosm_devlink *ipc_devlink)
+{
+ struct iosm_imem *ipc_imem = ipc_devlink->pcie->imem;
+ int boot_check_timeout = BOOT_CHECK_DEFAULT_TIMEOUT;
+ enum ipc_mem_exec_stage exec_stage;
+ struct ipc_mem_channel *channel;
+ enum ipc_phase curr_phase;
+ int status = 0;
+ u32 tail = 0;
+
+ channel = ipc_imem->ipc_devlink->devlink_sio.channel;
+ curr_phase = ipc_imem->phase;
+ /* Increase the total wait time to boot_check_timeout */
+ do {
+ exec_stage = ipc_mmio_get_exec_stage(ipc_imem->mmio);
+ if (exec_stage == IPC_MEM_EXEC_STAGE_RUN ||
+ exec_stage == IPC_MEM_EXEC_STAGE_PSI)
+ break;
+ msleep(20);
+ boot_check_timeout -= 20;
+ } while (boot_check_timeout > 0);
+
+ /* If there are any pending TDs then wait for Timeout/Completion before
+ * closing pipe.
+ */
+ if (channel->ul_pipe.old_tail != channel->ul_pipe.old_head) {
+ status = wait_for_completion_interruptible_timeout
+ (&ipc_imem->ul_pend_sem,
+ msecs_to_jiffies(IPC_PEND_DATA_TIMEOUT));
+ if (status == 0) {
+ dev_dbg(ipc_imem->dev,
+ "Data Timeout on UL-Pipe:%d Head:%d Tail:%d",
+ channel->ul_pipe.pipe_nr,
+ channel->ul_pipe.old_head,
+ channel->ul_pipe.old_tail);
+ }
+ }
+
+ ipc_protocol_get_head_tail_index(ipc_imem->ipc_protocol,
+ &channel->dl_pipe, NULL, &tail);
+
+ if (tail != channel->dl_pipe.old_tail) {
+ status = wait_for_completion_interruptible_timeout
+ (&ipc_imem->dl_pend_sem,
+ msecs_to_jiffies(IPC_PEND_DATA_TIMEOUT));
+ if (status == 0) {
+ dev_dbg(ipc_imem->dev,
+ "Data Timeout on DL-Pipe:%d Head:%d Tail:%d",
+ channel->dl_pipe.pipe_nr,
+ channel->dl_pipe.old_head,
+ channel->dl_pipe.old_tail);
+ }
+ }
+
+ /* Due to wait for completion in messages, there is a small window
+ * between closing the pipe and updating the channel is closed. In this
+ * small window there could be HP update from Host Driver. Hence update
+ * the channel state as CLOSING to aviod unnecessary interrupt
+ * towards CP.
+ */
+ channel->state = IMEM_CHANNEL_CLOSING;
+ /* Release the pipe resources */
+ ipc_imem_pipe_cleanup(ipc_imem, &channel->ul_pipe);
+ ipc_imem_pipe_cleanup(ipc_imem, &channel->dl_pipe);
+}
+
+void ipc_imem_sys_devlink_notify_rx(struct iosm_devlink *ipc_devlink,
+ struct sk_buff *skb)
+{
+ skb_queue_tail(&ipc_devlink->devlink_sio.rx_list, skb);
+ complete(&ipc_devlink->devlink_sio.read_sem);
+}
+
+/* PSI transfer */
+static int ipc_imem_sys_psi_transfer(struct iosm_imem *ipc_imem,
+ struct ipc_mem_channel *channel,
+ unsigned char *buf, int count)
+{
+ int psi_start_timeout = PSI_START_DEFAULT_TIMEOUT;
+ enum ipc_mem_exec_stage exec_stage;
+
+ dma_addr_t mapping = 0;
+ int ret;
+
+ ret = ipc_pcie_addr_map(ipc_imem->pcie, buf, count, &mapping,
+ DMA_TO_DEVICE);
+ if (ret)
+ goto pcie_addr_map_fail;
+
+ /* Save the PSI information for the CP ROM driver on the doorbell
+ * scratchpad.
+ */
+ ipc_mmio_set_psi_addr_and_size(ipc_imem->mmio, mapping, count);
+ ipc_doorbell_fire(ipc_imem->pcie, 0, IPC_MEM_EXEC_STAGE_BOOT);
+
+ ret = wait_for_completion_interruptible_timeout
+ (&channel->ul_sem,
+ msecs_to_jiffies(IPC_PSI_TRANSFER_TIMEOUT));
+
+ if (ret <= 0) {
+ dev_err(ipc_imem->dev, "Failed PSI transfer to CP, Error-%d",
+ ret);
+ goto psi_transfer_fail;
+ }
+ /* If the PSI download fails, return the CP boot ROM exit code */
+ if (ipc_imem->rom_exit_code != IMEM_ROM_EXIT_OPEN_EXT &&
+ ipc_imem->rom_exit_code != IMEM_ROM_EXIT_CERT_EXT) {
+ ret = (-1) * ((int)ipc_imem->rom_exit_code);
+ goto psi_transfer_fail;
+ }
+
+ dev_dbg(ipc_imem->dev, "PSI image successfully downloaded");
+
+ /* Wait psi_start_timeout milliseconds until the CP PSI image is
+ * running and updates the execution_stage field with
+ * IPC_MEM_EXEC_STAGE_PSI. Verify the execution stage.
+ */
+ do {
+ exec_stage = ipc_mmio_get_exec_stage(ipc_imem->mmio);
+
+ if (exec_stage == IPC_MEM_EXEC_STAGE_PSI)
+ break;
+
+ msleep(20);
+ psi_start_timeout -= 20;
+ } while (psi_start_timeout > 0);
+
+ if (exec_stage != IPC_MEM_EXEC_STAGE_PSI)
+ goto psi_transfer_fail; /* Unknown status of CP PSI process. */
+
+ ipc_imem->phase = IPC_P_PSI;
+
+ /* Enter the PSI phase. */
+ dev_dbg(ipc_imem->dev, "execution_stage[%X] eq. PSI", exec_stage);
+
+ /* Request the RUNNING state from CP and wait until it was reached
+ * or timeout.
+ */
+ ipc_imem_ipc_init_check(ipc_imem);
+
+ ret = wait_for_completion_interruptible_timeout
+ (&channel->ul_sem, msecs_to_jiffies(IPC_PSI_TRANSFER_TIMEOUT));
+ if (ret <= 0) {
+ dev_err(ipc_imem->dev,
+ "Failed PSI RUNNING state on CP, Error-%d", ret);
+ goto psi_transfer_fail;
+ }
+
+ if (ipc_mmio_get_ipc_state(ipc_imem->mmio) !=
+ IPC_MEM_DEVICE_IPC_RUNNING) {
+ dev_err(ipc_imem->dev,
+ "ch[%d] %s: unexpected CP IPC state %d, not RUNNING",
+ channel->channel_id,
+ ipc_imem_phase_get_string(ipc_imem->phase),
+ ipc_mmio_get_ipc_state(ipc_imem->mmio));
+
+ goto psi_transfer_fail;
+ }
+
+ /* Create the flash channel for the transfer of the images. */
+ if (!ipc_imem_sys_devlink_open(ipc_imem)) {
+ dev_err(ipc_imem->dev, "can't open flash_channel");
+ goto psi_transfer_fail;
+ }
+
+ ret = 0;
+psi_transfer_fail:
+ ipc_pcie_addr_unmap(ipc_imem->pcie, count, mapping, DMA_TO_DEVICE);
+pcie_addr_map_fail:
+ return ret;
+}
+
+int ipc_imem_sys_devlink_write(struct iosm_devlink *ipc_devlink,
+ unsigned char *buf, int count)
+{
+ struct iosm_imem *ipc_imem = ipc_devlink->pcie->imem;
+ struct ipc_mem_channel *channel;
+ struct sk_buff *skb;
+ dma_addr_t mapping;
+ int ret;
+
+ channel = ipc_imem->ipc_devlink->devlink_sio.channel;
+
+ /* In the ROM phase the PSI image is passed to CP about a specific
+ * shared memory area and doorbell scratchpad directly.
+ */
+ if (ipc_imem->phase == IPC_P_ROM) {
+ ret = ipc_imem_sys_psi_transfer(ipc_imem, channel, buf, count);
+ /* If the PSI transfer fails then send crash
+ * Signature.
+ */
+ if (ret > 0)
+ ipc_imem_msg_send_feature_set(ipc_imem,
+ IPC_MEM_INBAND_CRASH_SIG,
+ false);
+ goto out;
+ }
+
+ /* Allocate skb memory for the uplink buffer. */
+ skb = ipc_pcie_alloc_skb(ipc_devlink->pcie, count, GFP_KERNEL, &mapping,
+ DMA_TO_DEVICE, 0);
+ if (!skb) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ memcpy(skb_put(skb, count), buf, count);
+
+ IPC_CB(skb)->op_type = UL_USR_OP_BLOCKED;
+
+ /* Add skb to the uplink skbuf accumulator. */
+ skb_queue_tail(&channel->ul_list, skb);
+
+ /* Inform the IPC tasklet to pass uplink IP packets to CP. */
+ if (!ipc_imem_call_cdev_write(ipc_imem)) {
+ ret = wait_for_completion_interruptible(&channel->ul_sem);
+
+ if (ret < 0) {
+ dev_err(ipc_imem->dev,
+ "ch[%d] no CP confirmation, status = %d",
+ channel->channel_id, ret);
+ ipc_pcie_kfree_skb(ipc_devlink->pcie, skb);
+ goto out;
+ }
+ }
+ ret = 0;
+out:
+ return ret;
+}
+
+int ipc_imem_sys_devlink_read(struct iosm_devlink *devlink, u8 *data,
+ u32 bytes_to_read, u32 *bytes_read)
+{
+ struct sk_buff *skb = NULL;
+ int rc = 0;
+
+ /* check skb is available in rx_list or wait for skb */
+ devlink->devlink_sio.devlink_read_pend = 1;
+ while (!skb && !(skb = skb_dequeue(&devlink->devlink_sio.rx_list))) {
+ if (!wait_for_completion_interruptible_timeout
+ (&devlink->devlink_sio.read_sem,
+ msecs_to_jiffies(IPC_READ_TIMEOUT))) {
+ dev_err(devlink->dev, "Read timedout");
+ rc = -ETIMEDOUT;
+ goto devlink_read_fail;
+ }
+ }
+ devlink->devlink_sio.devlink_read_pend = 0;
+ if (bytes_to_read < skb->len) {
+ dev_err(devlink->dev, "Invalid size,expected len %d", skb->len);
+ rc = -EINVAL;
+ goto devlink_read_fail;
+ }
+ *bytes_read = skb->len;
+ memcpy(data, skb->data, skb->len);
+
+devlink_read_fail:
+ ipc_pcie_kfree_skb(devlink->pcie, skb);
+ return rc;
+}
diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.h b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.h
index 2007fe23e9a5..f0c88ac5643c 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.h
+++ b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.h
@@ -9,7 +9,7 @@
#include "iosm_ipc_mux_codec.h"
/* Maximum wait time for blocking read */
-#define IPC_READ_TIMEOUT 500
+#define IPC_READ_TIMEOUT 3000
/* The delay in ms for defering the unregister */
#define SIO_UNREGISTER_DEFER_DELAY_MS 1
@@ -98,4 +98,51 @@ int ipc_imem_sys_wwan_transmit(struct iosm_imem *ipc_imem, int if_id,
*/
void ipc_imem_wwan_channel_init(struct iosm_imem *ipc_imem,
enum ipc_mux_protocol mux_type);
+
+/**
+ * ipc_imem_sys_devlink_open - Open a Flash/CD Channel link to CP
+ * @ipc_imem: iosm_imem instance
+ *
+ * Return: channel instance on success, NULL for failure
+ */
+struct ipc_mem_channel *ipc_imem_sys_devlink_open(struct iosm_imem *ipc_imem);
+
+/**
+ * ipc_imem_sys_devlink_close - Release a Flash/CD channel link to CP
+ * @ipc_devlink: Pointer to ipc_devlink data-struct
+ *
+ */
+void ipc_imem_sys_devlink_close(struct iosm_devlink *ipc_devlink);
+
+/**
+ * ipc_imem_sys_devlink_notify_rx - Receive downlink characters from CP,
+ * the downlink skbuf is added at the end of the
+ * downlink or rx list
+ * @ipc_devlink: Pointer to ipc_devlink data-struct
+ * @skb: Pointer to sk buffer
+ */
+void ipc_imem_sys_devlink_notify_rx(struct iosm_devlink *ipc_devlink,
+ struct sk_buff *skb);
+
+/**
+ * ipc_imem_sys_devlink_read - Copy the rx data and free the skbuf
+ * @ipc_devlink: Devlink instance
+ * @data: Buffer to read the data from modem
+ * @bytes_to_read: Size of destination buffer
+ * @bytes_read: Number of bytes read
+ *
+ * Return: 0 on success and failure value on error
+ */
+int ipc_imem_sys_devlink_read(struct iosm_devlink *ipc_devlink, u8 *data,
+ u32 bytes_to_read, u32 *bytes_read);
+
+/**
+ * ipc_imem_sys_devlink_write - Route the uplink buffer to CP
+ * @ipc_devlink: Devlink_sio instance
+ * @buf: Pointer to buffer
+ * @count: Number of data bytes to write
+ * Return: 0 on success and failure value on error
+ */
+int ipc_imem_sys_devlink_write(struct iosm_devlink *ipc_devlink,
+ unsigned char *buf, int count);
#endif
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 32d5bc4919d8..0f7fd159f0f2 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -1474,7 +1474,7 @@ int xenvif_map_frontend_data_rings(struct xenvif_queue *queue,
struct xen_netif_tx_sring *txs;
struct xen_netif_rx_sring *rxs;
RING_IDX rsp_prod, req_prod;
- int err = -ENOMEM;
+ int err;
err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif),
&tx_ring_ref, 1, &addr);
diff --git a/drivers/nfc/fdp/i2c.c b/drivers/nfc/fdp/i2c.c
index 051c43a2a52f..f78670bf41e0 100644
--- a/drivers/nfc/fdp/i2c.c
+++ b/drivers/nfc/fdp/i2c.c
@@ -335,7 +335,6 @@ static int fdp_nci_i2c_probe(struct i2c_client *client)
return r;
}
- dev_dbg(dev, "I2C driver loaded\n");
return 0;
}
diff --git a/drivers/nfc/microread/i2c.c b/drivers/nfc/microread/i2c.c
index 86f593c73ed6..067295124eb9 100644
--- a/drivers/nfc/microread/i2c.c
+++ b/drivers/nfc/microread/i2c.c
@@ -237,8 +237,6 @@ static int microread_i2c_probe(struct i2c_client *client,
struct microread_i2c_phy *phy;
int r;
- dev_dbg(&client->dev, "client %p\n", client);
-
phy = devm_kzalloc(&client->dev, sizeof(struct microread_i2c_phy),
GFP_KERNEL);
if (!phy)
@@ -262,8 +260,6 @@ static int microread_i2c_probe(struct i2c_client *client,
if (r < 0)
goto err_irq;
- nfc_info(&client->dev, "Probed\n");
-
return 0;
err_irq:
diff --git a/drivers/nfc/microread/mei.c b/drivers/nfc/microread/mei.c
index 8edf761a6b2a..e2a77a5fc887 100644
--- a/drivers/nfc/microread/mei.c
+++ b/drivers/nfc/microread/mei.c
@@ -23,13 +23,9 @@ static int microread_mei_probe(struct mei_cl_device *cldev,
struct nfc_mei_phy *phy;
int r;
- pr_info("Probing NFC microread\n");
-
phy = nfc_mei_phy_alloc(cldev);
- if (!phy) {
- pr_err("Cannot allocate memory for microread mei phy.\n");
+ if (!phy)
return -ENOMEM;
- }
r = microread_probe(phy, &mei_phy_ops, LLC_NOP_NAME,
MEI_NFC_HEADER_SIZE, 0, MEI_NFC_MAX_HCI_PAYLOAD,
diff --git a/drivers/nfc/nfcmrvl/fw_dnld.c b/drivers/nfc/nfcmrvl/fw_dnld.c
index edac56b01fd1..e83f65596a88 100644
--- a/drivers/nfc/nfcmrvl/fw_dnld.c
+++ b/drivers/nfc/nfcmrvl/fw_dnld.c
@@ -76,10 +76,8 @@ static struct sk_buff *alloc_lc_skb(struct nfcmrvl_private *priv, uint8_t plen)
struct nci_data_hdr *hdr;
skb = nci_skb_alloc(priv->ndev, (NCI_DATA_HDR_SIZE + plen), GFP_KERNEL);
- if (!skb) {
- pr_err("no memory for data\n");
+ if (!skb)
return NULL;
- }
hdr = skb_put(skb, NCI_DATA_HDR_SIZE);
hdr->conn_id = NCI_CORE_LC_CONNID_PROP_FW_DL;
diff --git a/drivers/nfc/pn533/i2c.c b/drivers/nfc/pn533/i2c.c
index e6bf8cfe3aa7..673eb5e9b887 100644
--- a/drivers/nfc/pn533/i2c.c
+++ b/drivers/nfc/pn533/i2c.c
@@ -128,7 +128,6 @@ static int pn533_i2c_read(struct pn533_i2c_phy *phy, struct sk_buff **skb)
static irqreturn_t pn533_i2c_irq_thread_fn(int irq, void *data)
{
struct pn533_i2c_phy *phy = data;
- struct i2c_client *client;
struct sk_buff *skb = NULL;
int r;
@@ -137,9 +136,6 @@ static irqreturn_t pn533_i2c_irq_thread_fn(int irq, void *data)
return IRQ_NONE;
}
- client = phy->i2c_dev;
- dev_dbg(&client->dev, "IRQ\n");
-
if (phy->hard_fault != 0)
return IRQ_HANDLED;
@@ -160,7 +156,7 @@ static irqreturn_t pn533_i2c_irq_thread_fn(int irq, void *data)
return IRQ_HANDLED;
}
-static struct pn533_phy_ops i2c_phy_ops = {
+static const struct pn533_phy_ops i2c_phy_ops = {
.send_frame = pn533_i2c_send_frame,
.send_ack = pn533_i2c_send_ack,
.abort_cmd = pn533_i2c_abort_cmd,
diff --git a/drivers/nfc/pn533/pn533.c b/drivers/nfc/pn533/pn533.c
index 2f3f3fe9a0ba..787bcbd290f7 100644
--- a/drivers/nfc/pn533/pn533.c
+++ b/drivers/nfc/pn533/pn533.c
@@ -1235,8 +1235,6 @@ static void pn533_listen_mode_timer(struct timer_list *t)
{
struct pn533 *dev = from_timer(dev, t, listen_timer);
- dev_dbg(dev->dev, "Listen mode timeout\n");
-
dev->cancel_listen = 1;
pn533_poll_next_mod(dev);
@@ -2173,7 +2171,7 @@ void pn533_recv_frame(struct pn533 *dev, struct sk_buff *skb, int status)
}
if (skb == NULL) {
- pr_err("NULL Frame -> link is dead\n");
+ dev_err(dev->dev, "NULL Frame -> link is dead\n");
goto sched_wq;
}
@@ -2735,7 +2733,7 @@ EXPORT_SYMBOL_GPL(pn533_finalize_setup);
struct pn533 *pn53x_common_init(u32 device_type,
enum pn533_protocol_type protocol_type,
void *phy,
- struct pn533_phy_ops *phy_ops,
+ const struct pn533_phy_ops *phy_ops,
struct pn533_frame_ops *fops,
struct device *dev)
{
diff --git a/drivers/nfc/pn533/pn533.h b/drivers/nfc/pn533/pn533.h
index 5f94f38a2a08..09e35b8693f5 100644
--- a/drivers/nfc/pn533/pn533.h
+++ b/drivers/nfc/pn533/pn533.h
@@ -177,7 +177,7 @@ struct pn533 {
struct device *dev;
void *phy;
- struct pn533_phy_ops *phy_ops;
+ const struct pn533_phy_ops *phy_ops;
};
typedef int (*pn533_send_async_complete_t) (struct pn533 *dev, void *arg,
@@ -232,7 +232,7 @@ struct pn533_phy_ops {
struct pn533 *pn53x_common_init(u32 device_type,
enum pn533_protocol_type protocol_type,
void *phy,
- struct pn533_phy_ops *phy_ops,
+ const struct pn533_phy_ops *phy_ops,
struct pn533_frame_ops *fops,
struct device *dev);
diff --git a/drivers/nfc/pn533/uart.c b/drivers/nfc/pn533/uart.c
index 7bdaf8263070..2caf997f9bc9 100644
--- a/drivers/nfc/pn533/uart.c
+++ b/drivers/nfc/pn533/uart.c
@@ -123,7 +123,7 @@ static int pn532_dev_down(struct pn533 *dev)
return 0;
}
-static struct pn533_phy_ops uart_phy_ops = {
+static const struct pn533_phy_ops uart_phy_ops = {
.send_frame = pn532_uart_send_frame,
.send_ack = pn532_uart_send_ack,
.abort_cmd = pn532_uart_abort_cmd,
@@ -224,7 +224,7 @@ static int pn532_receive_buf(struct serdev_device *serdev,
return i;
}
-static struct serdev_device_ops pn532_serdev_ops = {
+static const struct serdev_device_ops pn532_serdev_ops = {
.receive_buf = pn532_receive_buf,
.write_wakeup = serdev_device_write_wakeup,
};
diff --git a/drivers/nfc/pn533/usb.c b/drivers/nfc/pn533/usb.c
index bd7f7478d189..6f71ac72012e 100644
--- a/drivers/nfc/pn533/usb.c
+++ b/drivers/nfc/pn533/usb.c
@@ -429,7 +429,7 @@ static void pn533_send_complete(struct urb *urb)
}
}
-static struct pn533_phy_ops usb_phy_ops = {
+static const struct pn533_phy_ops usb_phy_ops = {
.send_frame = pn533_usb_send_frame,
.send_ack = pn533_usb_send_ack,
.abort_cmd = pn533_usb_abort_cmd,
diff --git a/drivers/nfc/pn544/mei.c b/drivers/nfc/pn544/mei.c
index 5c10aac085a4..c493f2dbd0e2 100644
--- a/drivers/nfc/pn544/mei.c
+++ b/drivers/nfc/pn544/mei.c
@@ -22,13 +22,9 @@ static int pn544_mei_probe(struct mei_cl_device *cldev,
struct nfc_mei_phy *phy;
int r;
- pr_info("Probing NFC pn544\n");
-
phy = nfc_mei_phy_alloc(cldev);
- if (!phy) {
- pr_err("Cannot allocate memory for pn544 mei phy.\n");
+ if (!phy)
return -ENOMEM;
- }
r = pn544_hci_probe(phy, &mei_phy_ops, LLC_NOP_NAME,
MEI_NFC_HEADER_SIZE, 0, MEI_NFC_MAX_HCI_PAYLOAD,
@@ -46,8 +42,6 @@ static void pn544_mei_remove(struct mei_cl_device *cldev)
{
struct nfc_mei_phy *phy = mei_cldev_get_drvdata(cldev);
- pr_info("Removing pn544\n");
-
pn544_hci_remove(phy->hdev);
nfc_mei_phy_free(phy);
diff --git a/drivers/nfc/s3fwrn5/firmware.c b/drivers/nfc/s3fwrn5/firmware.c
index 1af7a1e632cf..c20fdbac51c5 100644
--- a/drivers/nfc/s3fwrn5/firmware.c
+++ b/drivers/nfc/s3fwrn5/firmware.c
@@ -357,6 +357,7 @@ s3fwrn5_fw_is_custom(const struct s3fwrn5_fw_cmd_get_bootinfo_rsp *bootinfo)
int s3fwrn5_fw_setup(struct s3fwrn5_fw_info *fw_info)
{
+ struct device *dev = &fw_info->ndev->nfc_dev->dev;
struct s3fwrn5_fw_cmd_get_bootinfo_rsp bootinfo;
int ret;
@@ -364,8 +365,7 @@ int s3fwrn5_fw_setup(struct s3fwrn5_fw_info *fw_info)
ret = s3fwrn5_fw_get_bootinfo(fw_info, &bootinfo);
if (ret < 0) {
- dev_err(&fw_info->ndev->nfc_dev->dev,
- "Failed to get bootinfo, ret=%02x\n", ret);
+ dev_err(dev, "Failed to get bootinfo, ret=%02x\n", ret);
goto err;
}
@@ -373,8 +373,7 @@ int s3fwrn5_fw_setup(struct s3fwrn5_fw_info *fw_info)
ret = s3fwrn5_fw_get_base_addr(&bootinfo, &fw_info->base_addr);
if (ret < 0) {
- dev_err(&fw_info->ndev->nfc_dev->dev,
- "Unknown hardware version\n");
+ dev_err(dev, "Unknown hardware version\n");
goto err;
}
@@ -409,6 +408,7 @@ bool s3fwrn5_fw_check_version(const struct s3fwrn5_fw_info *fw_info, u32 version
int s3fwrn5_fw_download(struct s3fwrn5_fw_info *fw_info)
{
+ struct device *dev = &fw_info->ndev->nfc_dev->dev;
struct s3fwrn5_fw_image *fw = &fw_info->fw;
u8 hash_data[SHA1_DIGEST_SIZE];
struct crypto_shash *tfm;
@@ -421,8 +421,7 @@ int s3fwrn5_fw_download(struct s3fwrn5_fw_info *fw_info)
tfm = crypto_alloc_shash("sha1", 0, 0);
if (IS_ERR(tfm)) {
- dev_err(&fw_info->ndev->nfc_dev->dev,
- "Cannot allocate shash (code=%pe)\n", tfm);
+ dev_err(dev, "Cannot allocate shash (code=%pe)\n", tfm);
return PTR_ERR(tfm);
}
@@ -430,21 +429,18 @@ int s3fwrn5_fw_download(struct s3fwrn5_fw_info *fw_info)
crypto_free_shash(tfm);
if (ret) {
- dev_err(&fw_info->ndev->nfc_dev->dev,
- "Cannot compute hash (code=%d)\n", ret);
+ dev_err(dev, "Cannot compute hash (code=%d)\n", ret);
return ret;
}
/* Firmware update process */
- dev_info(&fw_info->ndev->nfc_dev->dev,
- "Firmware update: %s\n", fw_info->fw_name);
+ dev_info(dev, "Firmware update: %s\n", fw_info->fw_name);
ret = s3fwrn5_fw_enter_update_mode(fw_info, hash_data,
SHA1_DIGEST_SIZE, fw_info->sig, fw_info->sig_size);
if (ret < 0) {
- dev_err(&fw_info->ndev->nfc_dev->dev,
- "Unable to enter update mode\n");
+ dev_err(dev, "Unable to enter update mode\n");
return ret;
}
@@ -452,21 +448,18 @@ int s3fwrn5_fw_download(struct s3fwrn5_fw_info *fw_info)
ret = s3fwrn5_fw_update_sector(fw_info,
fw_info->base_addr + off, fw->image + off);
if (ret < 0) {
- dev_err(&fw_info->ndev->nfc_dev->dev,
- "Firmware update error (code=%d)\n", ret);
+ dev_err(dev, "Firmware update error (code=%d)\n", ret);
return ret;
}
}
ret = s3fwrn5_fw_complete_update_mode(fw_info);
if (ret < 0) {
- dev_err(&fw_info->ndev->nfc_dev->dev,
- "Unable to complete update mode\n");
+ dev_err(dev, "Unable to complete update mode\n");
return ret;
}
- dev_info(&fw_info->ndev->nfc_dev->dev,
- "Firmware update: success\n");
+ dev_info(dev, "Firmware update: success\n");
return ret;
}
diff --git a/drivers/nfc/s3fwrn5/nci.c b/drivers/nfc/s3fwrn5/nci.c
index e374e670b36b..ca6828f55ba0 100644
--- a/drivers/nfc/s3fwrn5/nci.c
+++ b/drivers/nfc/s3fwrn5/nci.c
@@ -47,6 +47,7 @@ const struct nci_driver_ops s3fwrn5_nci_prop_ops[4] = {
int s3fwrn5_nci_rf_configure(struct s3fwrn5_info *info, const char *fw_name)
{
+ struct device *dev = &info->ndev->nfc_dev->dev;
const struct firmware *fw;
struct nci_prop_fw_cfg_cmd fw_cfg;
struct nci_prop_set_rfreg_cmd set_rfreg;
@@ -55,7 +56,7 @@ int s3fwrn5_nci_rf_configure(struct s3fwrn5_info *info, const char *fw_name)
int i, len;
int ret;
- ret = request_firmware(&fw, fw_name, &info->ndev->nfc_dev->dev);
+ ret = request_firmware(&fw, fw_name, dev);
if (ret < 0)
return ret;
@@ -77,13 +78,11 @@ int s3fwrn5_nci_rf_configure(struct s3fwrn5_info *info, const char *fw_name)
/* Start rfreg configuration */
- dev_info(&info->ndev->nfc_dev->dev,
- "rfreg configuration update: %s\n", fw_name);
+ dev_info(dev, "rfreg configuration update: %s\n", fw_name);
ret = nci_prop_cmd(info->ndev, NCI_PROP_START_RFREG, 0, NULL);
if (ret < 0) {
- dev_err(&info->ndev->nfc_dev->dev,
- "Unable to start rfreg update\n");
+ dev_err(dev, "Unable to start rfreg update\n");
goto out;
}
@@ -97,8 +96,7 @@ int s3fwrn5_nci_rf_configure(struct s3fwrn5_info *info, const char *fw_name)
ret = nci_prop_cmd(info->ndev, NCI_PROP_SET_RFREG,
len+1, (__u8 *)&set_rfreg);
if (ret < 0) {
- dev_err(&info->ndev->nfc_dev->dev,
- "rfreg update error (code=%d)\n", ret);
+ dev_err(dev, "rfreg update error (code=%d)\n", ret);
goto out;
}
set_rfreg.index++;
@@ -110,13 +108,11 @@ int s3fwrn5_nci_rf_configure(struct s3fwrn5_info *info, const char *fw_name)
ret = nci_prop_cmd(info->ndev, NCI_PROP_STOP_RFREG,
sizeof(stop_rfreg), (__u8 *)&stop_rfreg);
if (ret < 0) {
- dev_err(&info->ndev->nfc_dev->dev,
- "Unable to stop rfreg update\n");
+ dev_err(dev, "Unable to stop rfreg update\n");
goto out;
}
- dev_info(&info->ndev->nfc_dev->dev,
- "rfreg configuration update: success\n");
+ dev_info(dev, "rfreg configuration update: success\n");
out:
release_firmware(fw);
return ret;
diff --git a/drivers/nfc/st-nci/i2c.c b/drivers/nfc/st-nci/i2c.c
index ccf6152ebb9f..cbd968f013c7 100644
--- a/drivers/nfc/st-nci/i2c.c
+++ b/drivers/nfc/st-nci/i2c.c
@@ -157,7 +157,6 @@ static int st_nci_i2c_read(struct st_nci_i2c_phy *phy,
static irqreturn_t st_nci_irq_thread_fn(int irq, void *phy_id)
{
struct st_nci_i2c_phy *phy = phy_id;
- struct i2c_client *client;
struct sk_buff *skb = NULL;
int r;
@@ -166,9 +165,6 @@ static irqreturn_t st_nci_irq_thread_fn(int irq, void *phy_id)
return IRQ_NONE;
}
- client = phy->i2c_dev;
- dev_dbg(&client->dev, "IRQ\n");
-
if (phy->ndlc->hard_fault)
return IRQ_HANDLED;
diff --git a/drivers/nfc/st-nci/ndlc.c b/drivers/nfc/st-nci/ndlc.c
index e9dc313b333e..755460a73c0d 100644
--- a/drivers/nfc/st-nci/ndlc.c
+++ b/drivers/nfc/st-nci/ndlc.c
@@ -239,8 +239,6 @@ static void ndlc_t1_timeout(struct timer_list *t)
{
struct llt_ndlc *ndlc = from_timer(ndlc, t, t1_timer);
- pr_debug("\n");
-
schedule_work(&ndlc->sm_work);
}
@@ -248,8 +246,6 @@ static void ndlc_t2_timeout(struct timer_list *t)
{
struct llt_ndlc *ndlc = from_timer(ndlc, t, t2_timer);
- pr_debug("\n");
-
schedule_work(&ndlc->sm_work);
}
diff --git a/drivers/nfc/st-nci/se.c b/drivers/nfc/st-nci/se.c
index 5fd89f72969d..7764b1a4c3cf 100644
--- a/drivers/nfc/st-nci/se.c
+++ b/drivers/nfc/st-nci/se.c
@@ -638,8 +638,6 @@ int st_nci_se_io(struct nci_dev *ndev, u32 se_idx,
{
struct st_nci_info *info = nci_get_drvdata(ndev);
- pr_debug("\n");
-
switch (se_idx) {
case ST_NCI_ESE_HOST_ID:
info->se_info.cb = cb;
@@ -671,8 +669,6 @@ static void st_nci_se_wt_timeout(struct timer_list *t)
u8 param = 0x01;
struct st_nci_info *info = from_timer(info, t, se_info.bwi_timer);
- pr_debug("\n");
-
info->se_info.bwi_active = false;
if (!info->se_info.xch_error) {
@@ -692,8 +688,6 @@ static void st_nci_se_activation_timeout(struct timer_list *t)
struct st_nci_info *info = from_timer(info, t,
se_info.se_active_timer);
- pr_debug("\n");
-
info->se_info.se_active = false;
complete(&info->se_info.req_completion);
diff --git a/drivers/nfc/st-nci/spi.c b/drivers/nfc/st-nci/spi.c
index 0875b773fb41..4e723992e74c 100644
--- a/drivers/nfc/st-nci/spi.c
+++ b/drivers/nfc/st-nci/spi.c
@@ -169,7 +169,6 @@ static int st_nci_spi_read(struct st_nci_spi_phy *phy,
static irqreturn_t st_nci_irq_thread_fn(int irq, void *phy_id)
{
struct st_nci_spi_phy *phy = phy_id;
- struct spi_device *dev;
struct sk_buff *skb = NULL;
int r;
@@ -178,9 +177,6 @@ static irqreturn_t st_nci_irq_thread_fn(int irq, void *phy_id)
return IRQ_NONE;
}
- dev = phy->spi_dev;
- dev_dbg(&dev->dev, "IRQ\n");
-
if (phy->ndlc->hard_fault)
return IRQ_HANDLED;
diff --git a/drivers/nfc/st21nfca/i2c.c b/drivers/nfc/st21nfca/i2c.c
index 279d88128b2e..f126ce96a7df 100644
--- a/drivers/nfc/st21nfca/i2c.c
+++ b/drivers/nfc/st21nfca/i2c.c
@@ -421,7 +421,6 @@ static int st21nfca_hci_i2c_read(struct st21nfca_i2c_phy *phy,
static irqreturn_t st21nfca_hci_irq_thread_fn(int irq, void *phy_id)
{
struct st21nfca_i2c_phy *phy = phy_id;
- struct i2c_client *client;
int r;
@@ -430,9 +429,6 @@ static irqreturn_t st21nfca_hci_irq_thread_fn(int irq, void *phy_id)
return IRQ_NONE;
}
- client = phy->i2c_dev;
- dev_dbg(&client->dev, "IRQ\n");
-
if (phy->hard_fault != 0)
return IRQ_HANDLED;
diff --git a/drivers/nfc/st21nfca/se.c b/drivers/nfc/st21nfca/se.c
index c8bdf078d111..a43fc4117fa5 100644
--- a/drivers/nfc/st21nfca/se.c
+++ b/drivers/nfc/st21nfca/se.c
@@ -257,8 +257,6 @@ static void st21nfca_se_wt_timeout(struct timer_list *t)
struct st21nfca_hci_info *info = from_timer(info, t,
se_info.bwi_timer);
- pr_debug("\n");
-
info->se_info.bwi_active = false;
if (!info->se_info.xch_error) {
@@ -278,8 +276,6 @@ static void st21nfca_se_activation_timeout(struct timer_list *t)
struct st21nfca_hci_info *info = from_timer(info, t,
se_info.se_active_timer);
- pr_debug("\n");
-
info->se_info.se_active = false;
complete(&info->se_info.req_completion);
diff --git a/drivers/nfc/trf7970a.c b/drivers/nfc/trf7970a.c
index 8890fcd59c39..29ca9c328df2 100644
--- a/drivers/nfc/trf7970a.c
+++ b/drivers/nfc/trf7970a.c
@@ -2170,8 +2170,6 @@ static int trf7970a_suspend(struct device *dev)
struct spi_device *spi = to_spi_device(dev);
struct trf7970a *trf = spi_get_drvdata(spi);
- dev_dbg(dev, "Suspend\n");
-
mutex_lock(&trf->lock);
trf7970a_shutdown(trf);
@@ -2187,8 +2185,6 @@ static int trf7970a_resume(struct device *dev)
struct trf7970a *trf = spi_get_drvdata(spi);
int ret;
- dev_dbg(dev, "Resume\n");
-
mutex_lock(&trf->lock);
ret = trf7970a_startup(trf);
@@ -2206,8 +2202,6 @@ static int trf7970a_pm_runtime_suspend(struct device *dev)
struct trf7970a *trf = spi_get_drvdata(spi);
int ret;
- dev_dbg(dev, "Runtime suspend\n");
-
mutex_lock(&trf->lock);
ret = trf7970a_power_down(trf);
@@ -2223,8 +2217,6 @@ static int trf7970a_pm_runtime_resume(struct device *dev)
struct trf7970a *trf = spi_get_drvdata(spi);
int ret;
- dev_dbg(dev, "Runtime resume\n");
-
ret = trf7970a_power_up(trf);
if (!ret)
pm_runtime_mark_last_busy(dev);
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig
index 3dfeae8912df..80b5fd44ab1c 100644
--- a/drivers/of/Kconfig
+++ b/drivers/of/Kconfig
@@ -70,10 +70,6 @@ config OF_IRQ
def_bool y
depends on !SPARC && IRQ_DOMAIN
-config OF_NET
- depends on NETDEVICES
- def_bool y
-
config OF_RESERVED_MEM
def_bool OF_EARLY_FLATTREE
diff --git a/drivers/of/Makefile b/drivers/of/Makefile
index c13b982084a3..e0360a44306e 100644
--- a/drivers/of/Makefile
+++ b/drivers/of/Makefile
@@ -7,7 +7,6 @@ obj-$(CONFIG_OF_EARLY_FLATTREE) += fdt_address.o
obj-$(CONFIG_OF_PROMTREE) += pdt.o
obj-$(CONFIG_OF_ADDRESS) += address.o
obj-$(CONFIG_OF_IRQ) += irq.o
-obj-$(CONFIG_OF_NET) += of_net.o
obj-$(CONFIG_OF_UNITTEST) += unittest.o
obj-$(CONFIG_OF_RESERVED_MEM) += of_reserved_mem.o
obj-$(CONFIG_OF_RESOLVE) += resolver.o
diff --git a/drivers/of/of_net.c b/drivers/of/of_net.c
deleted file mode 100644
index dbac3a172a11..000000000000
--- a/drivers/of/of_net.c
+++ /dev/null
@@ -1,145 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * OF helpers for network devices.
- *
- * Initially copied out of arch/powerpc/kernel/prom_parse.c
- */
-#include <linux/etherdevice.h>
-#include <linux/kernel.h>
-#include <linux/of_net.h>
-#include <linux/of_platform.h>
-#include <linux/phy.h>
-#include <linux/export.h>
-#include <linux/device.h>
-#include <linux/nvmem-consumer.h>
-
-/**
- * of_get_phy_mode - Get phy mode for given device_node
- * @np: Pointer to the given device_node
- * @interface: Pointer to the result
- *
- * The function gets phy interface string from property 'phy-mode' or
- * 'phy-connection-type'. The index in phy_modes table is set in
- * interface and 0 returned. In case of error interface is set to
- * PHY_INTERFACE_MODE_NA and an errno is returned, e.g. -ENODEV.
- */
-int of_get_phy_mode(struct device_node *np, phy_interface_t *interface)
-{
- const char *pm;
- int err, i;
-
- *interface = PHY_INTERFACE_MODE_NA;
-
- err = of_property_read_string(np, "phy-mode", &pm);
- if (err < 0)
- err = of_property_read_string(np, "phy-connection-type", &pm);
- if (err < 0)
- return err;
-
- for (i = 0; i < PHY_INTERFACE_MODE_MAX; i++)
- if (!strcasecmp(pm, phy_modes(i))) {
- *interface = i;
- return 0;
- }
-
- return -ENODEV;
-}
-EXPORT_SYMBOL_GPL(of_get_phy_mode);
-
-static int of_get_mac_addr(struct device_node *np, const char *name, u8 *addr)
-{
- struct property *pp = of_find_property(np, name, NULL);
-
- if (pp && pp->length == ETH_ALEN && is_valid_ether_addr(pp->value)) {
- memcpy(addr, pp->value, ETH_ALEN);
- return 0;
- }
- return -ENODEV;
-}
-
-static int of_get_mac_addr_nvmem(struct device_node *np, u8 *addr)
-{
- struct platform_device *pdev = of_find_device_by_node(np);
- struct nvmem_cell *cell;
- const void *mac;
- size_t len;
- int ret;
-
- /* Try lookup by device first, there might be a nvmem_cell_lookup
- * associated with a given device.
- */
- if (pdev) {
- ret = nvmem_get_mac_address(&pdev->dev, addr);
- put_device(&pdev->dev);
- return ret;
- }
-
- cell = of_nvmem_cell_get(np, "mac-address");
- if (IS_ERR(cell))
- return PTR_ERR(cell);
-
- mac = nvmem_cell_read(cell, &len);
- nvmem_cell_put(cell);
-
- if (IS_ERR(mac))
- return PTR_ERR(mac);
-
- if (len != ETH_ALEN || !is_valid_ether_addr(mac)) {
- kfree(mac);
- return -EINVAL;
- }
-
- memcpy(addr, mac, ETH_ALEN);
- kfree(mac);
-
- return 0;
-}
-
-/**
- * of_get_mac_address()
- * @np: Caller's Device Node
- * @addr: Pointer to a six-byte array for the result
- *
- * Search the device tree for the best MAC address to use. 'mac-address' is
- * checked first, because that is supposed to contain to "most recent" MAC
- * address. If that isn't set, then 'local-mac-address' is checked next,
- * because that is the default address. If that isn't set, then the obsolete
- * 'address' is checked, just in case we're using an old device tree. If any
- * of the above isn't set, then try to get MAC address from nvmem cell named
- * 'mac-address'.
- *
- * Note that the 'address' property is supposed to contain a virtual address of
- * the register set, but some DTS files have redefined that property to be the
- * MAC address.
- *
- * All-zero MAC addresses are rejected, because those could be properties that
- * exist in the device tree, but were not set by U-Boot. For example, the
- * DTS could define 'mac-address' and 'local-mac-address', with zero MAC
- * addresses. Some older U-Boots only initialized 'local-mac-address'. In
- * this case, the real MAC is in 'local-mac-address', and 'mac-address' exists
- * but is all zeros.
- *
- * Return: 0 on success and errno in case of error.
-*/
-int of_get_mac_address(struct device_node *np, u8 *addr)
-{
- int ret;
-
- if (!np)
- return -ENODEV;
-
- ret = of_get_mac_addr(np, "mac-address", addr);
- if (!ret)
- return 0;
-
- ret = of_get_mac_addr(np, "local-mac-address", addr);
- if (!ret)
- return 0;
-
- ret = of_get_mac_addr(np, "address", addr);
- if (!ret)
- return 0;
-
- return of_get_mac_addr_nvmem(np, addr);
-}
-EXPORT_SYMBOL(of_get_mac_address);
diff --git a/drivers/pcmcia/pcmcia_cis.c b/drivers/pcmcia/pcmcia_cis.c
index d2d0ed4b27c8..f650e19a315c 100644
--- a/drivers/pcmcia/pcmcia_cis.c
+++ b/drivers/pcmcia/pcmcia_cis.c
@@ -14,6 +14,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
#include <pcmcia/cisreg.h>
#include <pcmcia/cistpl.h>
@@ -398,7 +399,6 @@ static int pcmcia_do_get_mac(struct pcmcia_device *p_dev, tuple_t *tuple,
void *priv)
{
struct net_device *dev = priv;
- int i;
if (tuple->TupleData[0] != CISTPL_FUNCE_LAN_NODE_ID)
return -EINVAL;
@@ -412,8 +412,7 @@ static int pcmcia_do_get_mac(struct pcmcia_device *p_dev, tuple_t *tuple,
dev_warn(&p_dev->dev, "Invalid header for LAN_NODE_ID\n");
return -EINVAL;
}
- for (i = 0; i < 6; i++)
- dev->dev_addr[i] = tuple->TupleData[i+2];
+ eth_hw_addr_set(dev, &tuple->TupleData[2]);
return 0;
}
diff --git a/drivers/ptp/idt8a340_reg.h b/drivers/ptp/idt8a340_reg.h
deleted file mode 100644
index ac524cf0f31f..000000000000
--- a/drivers/ptp/idt8a340_reg.h
+++ /dev/null
@@ -1,720 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ */
-/* idt8a340_reg.h
- *
- * Originally generated by regen.tcl on Thu Feb 14 19:23:44 PST 2019
- * https://github.com/richardcochran/regen
- *
- * Hand modified to include some HW registers.
- * Based on 4.8.0, SCSR rev C commit a03c7ae5
- */
-#ifndef HAVE_IDT8A340_REG
-#define HAVE_IDT8A340_REG
-
-#define PAGE_ADDR_BASE 0x0000
-#define PAGE_ADDR 0x00fc
-
-#define HW_REVISION 0x8180
-#define REV_ID 0x007a
-
-#define HW_DPLL_0 (0x8a00)
-#define HW_DPLL_1 (0x8b00)
-#define HW_DPLL_2 (0x8c00)
-#define HW_DPLL_3 (0x8d00)
-#define HW_DPLL_4 (0x8e00)
-#define HW_DPLL_5 (0x8f00)
-#define HW_DPLL_6 (0x9000)
-#define HW_DPLL_7 (0x9100)
-
-#define HW_DPLL_TOD_SW_TRIG_ADDR__0 (0x080)
-#define HW_DPLL_TOD_CTRL_1 (0x089)
-#define HW_DPLL_TOD_CTRL_2 (0x08A)
-#define HW_DPLL_TOD_OVR__0 (0x098)
-#define HW_DPLL_TOD_OUT_0__0 (0x0B0)
-
-#define HW_Q0_Q1_CH_SYNC_CTRL_0 (0xa740)
-#define HW_Q0_Q1_CH_SYNC_CTRL_1 (0xa741)
-#define HW_Q2_Q3_CH_SYNC_CTRL_0 (0xa742)
-#define HW_Q2_Q3_CH_SYNC_CTRL_1 (0xa743)
-#define HW_Q4_Q5_CH_SYNC_CTRL_0 (0xa744)
-#define HW_Q4_Q5_CH_SYNC_CTRL_1 (0xa745)
-#define HW_Q6_Q7_CH_SYNC_CTRL_0 (0xa746)
-#define HW_Q6_Q7_CH_SYNC_CTRL_1 (0xa747)
-#define HW_Q8_CH_SYNC_CTRL_0 (0xa748)
-#define HW_Q8_CH_SYNC_CTRL_1 (0xa749)
-#define HW_Q9_CH_SYNC_CTRL_0 (0xa74a)
-#define HW_Q9_CH_SYNC_CTRL_1 (0xa74b)
-#define HW_Q10_CH_SYNC_CTRL_0 (0xa74c)
-#define HW_Q10_CH_SYNC_CTRL_1 (0xa74d)
-#define HW_Q11_CH_SYNC_CTRL_0 (0xa74e)
-#define HW_Q11_CH_SYNC_CTRL_1 (0xa74f)
-
-#define SYNC_SOURCE_DPLL0_TOD_PPS 0x14
-#define SYNC_SOURCE_DPLL1_TOD_PPS 0x15
-#define SYNC_SOURCE_DPLL2_TOD_PPS 0x16
-#define SYNC_SOURCE_DPLL3_TOD_PPS 0x17
-
-#define SYNCTRL1_MASTER_SYNC_RST BIT(7)
-#define SYNCTRL1_MASTER_SYNC_TRIG BIT(5)
-#define SYNCTRL1_TOD_SYNC_TRIG BIT(4)
-#define SYNCTRL1_FBDIV_FRAME_SYNC_TRIG BIT(3)
-#define SYNCTRL1_FBDIV_SYNC_TRIG BIT(2)
-#define SYNCTRL1_Q1_DIV_SYNC_TRIG BIT(1)
-#define SYNCTRL1_Q0_DIV_SYNC_TRIG BIT(0)
-
-#define HW_Q8_CTRL_SPARE (0xa7d4)
-#define HW_Q11_CTRL_SPARE (0xa7ec)
-
-/**
- * Select FOD5 as sync_trigger for Q8 divider.
- * Transition from logic zero to one
- * sets trigger to sync Q8 divider.
- *
- * Unused when FOD4 is driving Q8 divider (normal operation).
- */
-#define Q9_TO_Q8_SYNC_TRIG BIT(1)
-
-/**
- * Enable FOD5 as driver for clock and sync for Q8 divider.
- * Enable fanout buffer for FOD5.
- *
- * Unused when FOD4 is driving Q8 divider (normal operation).
- */
-#define Q9_TO_Q8_FANOUT_AND_CLOCK_SYNC_ENABLE_MASK (BIT(0) | BIT(2))
-
-/**
- * Select FOD6 as sync_trigger for Q11 divider.
- * Transition from logic zero to one
- * sets trigger to sync Q11 divider.
- *
- * Unused when FOD7 is driving Q11 divider (normal operation).
- */
-#define Q10_TO_Q11_SYNC_TRIG BIT(1)
-
-/**
- * Enable FOD6 as driver for clock and sync for Q11 divider.
- * Enable fanout buffer for FOD6.
- *
- * Unused when FOD7 is driving Q11 divider (normal operation).
- */
-#define Q10_TO_Q11_FANOUT_AND_CLOCK_SYNC_ENABLE_MASK (BIT(0) | BIT(2))
-
-#define RESET_CTRL 0xc000
-#define SM_RESET 0x0012
-#define SM_RESET_CMD 0x5A
-
-#define GENERAL_STATUS 0xc014
-#define BOOT_STATUS 0x0000
-#define HW_REV_ID 0x000A
-#define BOND_ID 0x000B
-#define HW_CSR_ID 0x000C
-#define HW_IRQ_ID 0x000E
-
-#define MAJ_REL 0x0010
-#define MIN_REL 0x0011
-#define HOTFIX_REL 0x0012
-
-#define PIPELINE_ID 0x0014
-#define BUILD_ID 0x0018
-
-#define JTAG_DEVICE_ID 0x001c
-#define PRODUCT_ID 0x001e
-
-#define OTP_SCSR_CONFIG_SELECT 0x0022
-
-#define STATUS 0xc03c
-#define DPLL_SYS_STATUS 0x0020
-#define DPLL_SYS_APLL_STATUS 0x0021
-#define USER_GPIO0_TO_7_STATUS 0x008a
-#define USER_GPIO8_TO_15_STATUS 0x008b
-
-#define GPIO_USER_CONTROL 0xc160
-#define GPIO0_TO_7_OUT 0x0000
-#define GPIO8_TO_15_OUT 0x0001
-
-#define STICKY_STATUS_CLEAR 0xc164
-
-#define GPIO_TOD_NOTIFICATION_CLEAR 0xc16c
-
-#define ALERT_CFG 0xc188
-
-#define SYS_DPLL_XO 0xc194
-
-#define SYS_APLL 0xc19c
-
-#define INPUT_0 0xc1b0
-
-#define INPUT_1 0xc1c0
-
-#define INPUT_2 0xc1d0
-
-#define INPUT_3 0xc200
-
-#define INPUT_4 0xc210
-
-#define INPUT_5 0xc220
-
-#define INPUT_6 0xc230
-
-#define INPUT_7 0xc240
-
-#define INPUT_8 0xc250
-
-#define INPUT_9 0xc260
-
-#define INPUT_10 0xc280
-
-#define INPUT_11 0xc290
-
-#define INPUT_12 0xc2a0
-
-#define INPUT_13 0xc2b0
-
-#define INPUT_14 0xc2c0
-
-#define INPUT_15 0xc2d0
-
-#define REF_MON_0 0xc2e0
-
-#define REF_MON_1 0xc2ec
-
-#define REF_MON_2 0xc300
-
-#define REF_MON_3 0xc30c
-
-#define REF_MON_4 0xc318
-
-#define REF_MON_5 0xc324
-
-#define REF_MON_6 0xc330
-
-#define REF_MON_7 0xc33c
-
-#define REF_MON_8 0xc348
-
-#define REF_MON_9 0xc354
-
-#define REF_MON_10 0xc360
-
-#define REF_MON_11 0xc36c
-
-#define REF_MON_12 0xc380
-
-#define REF_MON_13 0xc38c
-
-#define REF_MON_14 0xc398
-
-#define REF_MON_15 0xc3a4
-
-#define DPLL_0 0xc3b0
-#define DPLL_CTRL_REG_0 0x0002
-#define DPLL_CTRL_REG_1 0x0003
-#define DPLL_CTRL_REG_2 0x0004
-#define DPLL_TOD_SYNC_CFG 0x0031
-#define DPLL_COMBO_SLAVE_CFG_0 0x0032
-#define DPLL_COMBO_SLAVE_CFG_1 0x0033
-#define DPLL_SLAVE_REF_CFG 0x0034
-#define DPLL_REF_MODE 0x0035
-#define DPLL_PHASE_MEASUREMENT_CFG 0x0036
-#define DPLL_MODE 0x0037
-
-#define DPLL_1 0xc400
-
-#define DPLL_2 0xc438
-
-#define DPLL_3 0xc480
-
-#define DPLL_4 0xc4b8
-
-#define DPLL_5 0xc500
-
-#define DPLL_6 0xc538
-
-#define DPLL_7 0xc580
-
-#define SYS_DPLL 0xc5b8
-
-#define DPLL_CTRL_0 0xc600
-#define DPLL_CTRL_DPLL_MANU_REF_CFG 0x0001
-#define DPLL_CTRL_COMBO_MASTER_CFG 0x003a
-
-#define DPLL_CTRL_1 0xc63c
-
-#define DPLL_CTRL_2 0xc680
-
-#define DPLL_CTRL_3 0xc6bc
-
-#define DPLL_CTRL_4 0xc700
-
-#define DPLL_CTRL_5 0xc73c
-
-#define DPLL_CTRL_6 0xc780
-
-#define DPLL_CTRL_7 0xc7bc
-
-#define SYS_DPLL_CTRL 0xc800
-
-#define DPLL_PHASE_0 0xc818
-
-/* Signed 42-bit FFO in units of 2^(-53) */
-#define DPLL_WR_PHASE 0x0000
-
-#define DPLL_PHASE_1 0xc81c
-
-#define DPLL_PHASE_2 0xc820
-
-#define DPLL_PHASE_3 0xc824
-
-#define DPLL_PHASE_4 0xc828
-
-#define DPLL_PHASE_5 0xc82c
-
-#define DPLL_PHASE_6 0xc830
-
-#define DPLL_PHASE_7 0xc834
-
-#define DPLL_FREQ_0 0xc838
-
-/* Signed 42-bit FFO in units of 2^(-53) */
-#define DPLL_WR_FREQ 0x0000
-
-#define DPLL_FREQ_1 0xc840
-
-#define DPLL_FREQ_2 0xc848
-
-#define DPLL_FREQ_3 0xc850
-
-#define DPLL_FREQ_4 0xc858
-
-#define DPLL_FREQ_5 0xc860
-
-#define DPLL_FREQ_6 0xc868
-
-#define DPLL_FREQ_7 0xc870
-
-#define DPLL_PHASE_PULL_IN_0 0xc880
-#define PULL_IN_OFFSET 0x0000 /* Signed 32 bit */
-#define PULL_IN_SLOPE_LIMIT 0x0004 /* Unsigned 24 bit */
-#define PULL_IN_CTRL 0x0007
-
-#define DPLL_PHASE_PULL_IN_1 0xc888
-
-#define DPLL_PHASE_PULL_IN_2 0xc890
-
-#define DPLL_PHASE_PULL_IN_3 0xc898
-
-#define DPLL_PHASE_PULL_IN_4 0xc8a0
-
-#define DPLL_PHASE_PULL_IN_5 0xc8a8
-
-#define DPLL_PHASE_PULL_IN_6 0xc8b0
-
-#define DPLL_PHASE_PULL_IN_7 0xc8b8
-
-#define GPIO_CFG 0xc8c0
-#define GPIO_CFG_GBL 0x0000
-
-#define GPIO_0 0xc8c2
-#define GPIO_DCO_INC_DEC 0x0000
-#define GPIO_OUT_CTRL_0 0x0001
-#define GPIO_OUT_CTRL_1 0x0002
-#define GPIO_TOD_TRIG 0x0003
-#define GPIO_DPLL_INDICATOR 0x0004
-#define GPIO_LOS_INDICATOR 0x0005
-#define GPIO_REF_INPUT_DSQ_0 0x0006
-#define GPIO_REF_INPUT_DSQ_1 0x0007
-#define GPIO_REF_INPUT_DSQ_2 0x0008
-#define GPIO_REF_INPUT_DSQ_3 0x0009
-#define GPIO_MAN_CLK_SEL_0 0x000a
-#define GPIO_MAN_CLK_SEL_1 0x000b
-#define GPIO_MAN_CLK_SEL_2 0x000c
-#define GPIO_SLAVE 0x000d
-#define GPIO_ALERT_OUT_CFG 0x000e
-#define GPIO_TOD_NOTIFICATION_CFG 0x000f
-#define GPIO_CTRL 0x0010
-
-#define GPIO_1 0xc8d4
-
-#define GPIO_2 0xc8e6
-
-#define GPIO_3 0xc900
-
-#define GPIO_4 0xc912
-
-#define GPIO_5 0xc924
-
-#define GPIO_6 0xc936
-
-#define GPIO_7 0xc948
-
-#define GPIO_8 0xc95a
-
-#define GPIO_9 0xc980
-
-#define GPIO_10 0xc992
-
-#define GPIO_11 0xc9a4
-
-#define GPIO_12 0xc9b6
-
-#define GPIO_13 0xc9c8
-
-#define GPIO_14 0xc9da
-
-#define GPIO_15 0xca00
-
-#define OUT_DIV_MUX 0xca12
-
-#define OUTPUT_0 0xca14
-/* FOD frequency output divider value */
-#define OUT_DIV 0x0000
-#define OUT_DUTY_CYCLE_HIGH 0x0004
-#define OUT_CTRL_0 0x0008
-#define OUT_CTRL_1 0x0009
-/* Phase adjustment in FOD cycles */
-#define OUT_PHASE_ADJ 0x000c
-
-#define OUTPUT_1 0xca24
-
-#define OUTPUT_2 0xca34
-
-#define OUTPUT_3 0xca44
-
-#define OUTPUT_4 0xca54
-
-#define OUTPUT_5 0xca64
-
-#define OUTPUT_6 0xca80
-
-#define OUTPUT_7 0xca90
-
-#define OUTPUT_8 0xcaa0
-
-#define OUTPUT_9 0xcab0
-
-#define OUTPUT_10 0xcac0
-
-#define OUTPUT_11 0xcad0
-
-#define SERIAL 0xcae0
-
-#define PWM_ENCODER_0 0xcb00
-
-#define PWM_ENCODER_1 0xcb08
-
-#define PWM_ENCODER_2 0xcb10
-
-#define PWM_ENCODER_3 0xcb18
-
-#define PWM_ENCODER_4 0xcb20
-
-#define PWM_ENCODER_5 0xcb28
-
-#define PWM_ENCODER_6 0xcb30
-
-#define PWM_ENCODER_7 0xcb38
-
-#define PWM_DECODER_0 0xcb40
-
-#define PWM_DECODER_1 0xcb48
-
-#define PWM_DECODER_2 0xcb50
-
-#define PWM_DECODER_3 0xcb58
-
-#define PWM_DECODER_4 0xcb60
-
-#define PWM_DECODER_5 0xcb68
-
-#define PWM_DECODER_6 0xcb70
-
-#define PWM_DECODER_7 0xcb80
-
-#define PWM_DECODER_8 0xcb88
-
-#define PWM_DECODER_9 0xcb90
-
-#define PWM_DECODER_10 0xcb98
-
-#define PWM_DECODER_11 0xcba0
-
-#define PWM_DECODER_12 0xcba8
-
-#define PWM_DECODER_13 0xcbb0
-
-#define PWM_DECODER_14 0xcbb8
-
-#define PWM_DECODER_15 0xcbc0
-
-#define PWM_USER_DATA 0xcbc8
-
-#define TOD_0 0xcbcc
-
-/* Enable TOD counter, output channel sync and even-PPS mode */
-#define TOD_CFG 0x0000
-
-#define TOD_1 0xcbce
-
-#define TOD_2 0xcbd0
-
-#define TOD_3 0xcbd2
-
-
-#define TOD_WRITE_0 0xcc00
-/* 8-bit subns, 32-bit ns, 48-bit seconds */
-#define TOD_WRITE 0x0000
-/* Counter increments after TOD write is completed */
-#define TOD_WRITE_COUNTER 0x000c
-/* TOD write trigger configuration */
-#define TOD_WRITE_SELECT_CFG_0 0x000d
-/* TOD write trigger selection */
-#define TOD_WRITE_CMD 0x000f
-
-#define TOD_WRITE_1 0xcc10
-
-#define TOD_WRITE_2 0xcc20
-
-#define TOD_WRITE_3 0xcc30
-
-#define TOD_READ_PRIMARY_0 0xcc40
-/* 8-bit subns, 32-bit ns, 48-bit seconds */
-#define TOD_READ_PRIMARY 0x0000
-/* Counter increments after TOD write is completed */
-#define TOD_READ_PRIMARY_COUNTER 0x000b
-/* Read trigger configuration */
-#define TOD_READ_PRIMARY_SEL_CFG_0 0x000c
-/* Read trigger selection */
-#define TOD_READ_PRIMARY_CMD 0x000e
-
-#define TOD_READ_PRIMARY_1 0xcc50
-
-#define TOD_READ_PRIMARY_2 0xcc60
-
-#define TOD_READ_PRIMARY_3 0xcc80
-
-#define TOD_READ_SECONDARY_0 0xcc90
-
-#define TOD_READ_SECONDARY_1 0xcca0
-
-#define TOD_READ_SECONDARY_2 0xccb0
-
-#define TOD_READ_SECONDARY_3 0xccc0
-
-#define OUTPUT_TDC_CFG 0xccd0
-
-#define OUTPUT_TDC_0 0xcd00
-
-#define OUTPUT_TDC_1 0xcd08
-
-#define OUTPUT_TDC_2 0xcd10
-
-#define OUTPUT_TDC_3 0xcd18
-
-#define INPUT_TDC 0xcd20
-
-#define SCRATCH 0xcf50
-
-#define EEPROM 0xcf68
-
-#define OTP 0xcf70
-
-#define BYTE 0xcf80
-
-/* Bit definitions for the MAJ_REL register */
-#define MAJOR_SHIFT (1)
-#define MAJOR_MASK (0x7f)
-#define PR_BUILD BIT(0)
-
-/* Bit definitions for the USER_GPIO0_TO_7_STATUS register */
-#define GPIO0_LEVEL BIT(0)
-#define GPIO1_LEVEL BIT(1)
-#define GPIO2_LEVEL BIT(2)
-#define GPIO3_LEVEL BIT(3)
-#define GPIO4_LEVEL BIT(4)
-#define GPIO5_LEVEL BIT(5)
-#define GPIO6_LEVEL BIT(6)
-#define GPIO7_LEVEL BIT(7)
-
-/* Bit definitions for the USER_GPIO8_TO_15_STATUS register */
-#define GPIO8_LEVEL BIT(0)
-#define GPIO9_LEVEL BIT(1)
-#define GPIO10_LEVEL BIT(2)
-#define GPIO11_LEVEL BIT(3)
-#define GPIO12_LEVEL BIT(4)
-#define GPIO13_LEVEL BIT(5)
-#define GPIO14_LEVEL BIT(6)
-#define GPIO15_LEVEL BIT(7)
-
-/* Bit definitions for the GPIO0_TO_7_OUT register */
-#define GPIO0_DRIVE_LEVEL BIT(0)
-#define GPIO1_DRIVE_LEVEL BIT(1)
-#define GPIO2_DRIVE_LEVEL BIT(2)
-#define GPIO3_DRIVE_LEVEL BIT(3)
-#define GPIO4_DRIVE_LEVEL BIT(4)
-#define GPIO5_DRIVE_LEVEL BIT(5)
-#define GPIO6_DRIVE_LEVEL BIT(6)
-#define GPIO7_DRIVE_LEVEL BIT(7)
-
-/* Bit definitions for the GPIO8_TO_15_OUT register */
-#define GPIO8_DRIVE_LEVEL BIT(0)
-#define GPIO9_DRIVE_LEVEL BIT(1)
-#define GPIO10_DRIVE_LEVEL BIT(2)
-#define GPIO11_DRIVE_LEVEL BIT(3)
-#define GPIO12_DRIVE_LEVEL BIT(4)
-#define GPIO13_DRIVE_LEVEL BIT(5)
-#define GPIO14_DRIVE_LEVEL BIT(6)
-#define GPIO15_DRIVE_LEVEL BIT(7)
-
-/* Bit definitions for the DPLL_TOD_SYNC_CFG register */
-#define TOD_SYNC_SOURCE_SHIFT (1)
-#define TOD_SYNC_SOURCE_MASK (0x3)
-#define TOD_SYNC_EN BIT(0)
-
-/* Bit definitions for the DPLL_MODE register */
-#define WRITE_TIMER_MODE BIT(6)
-#define PLL_MODE_SHIFT (3)
-#define PLL_MODE_MASK (0x7)
-#define STATE_MODE_SHIFT (0)
-#define STATE_MODE_MASK (0x7)
-
-/* Bit definitions for the GPIO_CFG_GBL register */
-#define SUPPLY_MODE_SHIFT (0)
-#define SUPPLY_MODE_MASK (0x3)
-
-/* Bit definitions for the GPIO_DCO_INC_DEC register */
-#define INCDEC_DPLL_INDEX_SHIFT (0)
-#define INCDEC_DPLL_INDEX_MASK (0x7)
-
-/* Bit definitions for the GPIO_OUT_CTRL_0 register */
-#define CTRL_OUT_0 BIT(0)
-#define CTRL_OUT_1 BIT(1)
-#define CTRL_OUT_2 BIT(2)
-#define CTRL_OUT_3 BIT(3)
-#define CTRL_OUT_4 BIT(4)
-#define CTRL_OUT_5 BIT(5)
-#define CTRL_OUT_6 BIT(6)
-#define CTRL_OUT_7 BIT(7)
-
-/* Bit definitions for the GPIO_OUT_CTRL_1 register */
-#define CTRL_OUT_8 BIT(0)
-#define CTRL_OUT_9 BIT(1)
-#define CTRL_OUT_10 BIT(2)
-#define CTRL_OUT_11 BIT(3)
-#define CTRL_OUT_12 BIT(4)
-#define CTRL_OUT_13 BIT(5)
-#define CTRL_OUT_14 BIT(6)
-#define CTRL_OUT_15 BIT(7)
-
-/* Bit definitions for the GPIO_TOD_TRIG register */
-#define TOD_TRIG_0 BIT(0)
-#define TOD_TRIG_1 BIT(1)
-#define TOD_TRIG_2 BIT(2)
-#define TOD_TRIG_3 BIT(3)
-
-/* Bit definitions for the GPIO_DPLL_INDICATOR register */
-#define IND_DPLL_INDEX_SHIFT (0)
-#define IND_DPLL_INDEX_MASK (0x7)
-
-/* Bit definitions for the GPIO_LOS_INDICATOR register */
-#define REFMON_INDEX_SHIFT (0)
-#define REFMON_INDEX_MASK (0xf)
-/* Active level of LOS indicator, 0=low 1=high */
-#define ACTIVE_LEVEL BIT(4)
-
-/* Bit definitions for the GPIO_REF_INPUT_DSQ_0 register */
-#define DSQ_INP_0 BIT(0)
-#define DSQ_INP_1 BIT(1)
-#define DSQ_INP_2 BIT(2)
-#define DSQ_INP_3 BIT(3)
-#define DSQ_INP_4 BIT(4)
-#define DSQ_INP_5 BIT(5)
-#define DSQ_INP_6 BIT(6)
-#define DSQ_INP_7 BIT(7)
-
-/* Bit definitions for the GPIO_REF_INPUT_DSQ_1 register */
-#define DSQ_INP_8 BIT(0)
-#define DSQ_INP_9 BIT(1)
-#define DSQ_INP_10 BIT(2)
-#define DSQ_INP_11 BIT(3)
-#define DSQ_INP_12 BIT(4)
-#define DSQ_INP_13 BIT(5)
-#define DSQ_INP_14 BIT(6)
-#define DSQ_INP_15 BIT(7)
-
-/* Bit definitions for the GPIO_REF_INPUT_DSQ_2 register */
-#define DSQ_DPLL_0 BIT(0)
-#define DSQ_DPLL_1 BIT(1)
-#define DSQ_DPLL_2 BIT(2)
-#define DSQ_DPLL_3 BIT(3)
-#define DSQ_DPLL_4 BIT(4)
-#define DSQ_DPLL_5 BIT(5)
-#define DSQ_DPLL_6 BIT(6)
-#define DSQ_DPLL_7 BIT(7)
-
-/* Bit definitions for the GPIO_REF_INPUT_DSQ_3 register */
-#define DSQ_DPLL_SYS BIT(0)
-#define GPIO_DSQ_LEVEL BIT(1)
-
-/* Bit definitions for the GPIO_TOD_NOTIFICATION_CFG register */
-#define DPLL_TOD_SHIFT (0)
-#define DPLL_TOD_MASK (0x3)
-#define TOD_READ_SECONDARY BIT(2)
-#define GPIO_ASSERT_LEVEL BIT(3)
-
-/* Bit definitions for the GPIO_CTRL register */
-#define GPIO_FUNCTION_EN BIT(0)
-#define GPIO_CMOS_OD_MODE BIT(1)
-#define GPIO_CONTROL_DIR BIT(2)
-#define GPIO_PU_PD_MODE BIT(3)
-#define GPIO_FUNCTION_SHIFT (4)
-#define GPIO_FUNCTION_MASK (0xf)
-
-/* Bit definitions for the OUT_CTRL_1 register */
-#define OUT_SYNC_DISABLE BIT(7)
-#define SQUELCH_VALUE BIT(6)
-#define SQUELCH_DISABLE BIT(5)
-#define PAD_VDDO_SHIFT (2)
-#define PAD_VDDO_MASK (0x7)
-#define PAD_CMOSDRV_SHIFT (0)
-#define PAD_CMOSDRV_MASK (0x3)
-
-/* Bit definitions for the TOD_CFG register */
-#define TOD_EVEN_PPS_MODE BIT(2)
-#define TOD_OUT_SYNC_ENABLE BIT(1)
-#define TOD_ENABLE BIT(0)
-
-/* Bit definitions for the TOD_WRITE_SELECT_CFG_0 register */
-#define WR_PWM_DECODER_INDEX_SHIFT (4)
-#define WR_PWM_DECODER_INDEX_MASK (0xf)
-#define WR_REF_INDEX_SHIFT (0)
-#define WR_REF_INDEX_MASK (0xf)
-
-/* Bit definitions for the TOD_WRITE_CMD register */
-#define TOD_WRITE_SELECTION_SHIFT (0)
-#define TOD_WRITE_SELECTION_MASK (0xf)
-/* 4.8.7 */
-#define TOD_WRITE_TYPE_SHIFT (4)
-#define TOD_WRITE_TYPE_MASK (0x3)
-
-/* Bit definitions for the TOD_READ_PRIMARY_SEL_CFG_0 register */
-#define RD_PWM_DECODER_INDEX_SHIFT (4)
-#define RD_PWM_DECODER_INDEX_MASK (0xf)
-#define RD_REF_INDEX_SHIFT (0)
-#define RD_REF_INDEX_MASK (0xf)
-
-/* Bit definitions for the TOD_READ_PRIMARY_CMD register */
-#define TOD_READ_TRIGGER_MODE BIT(4)
-#define TOD_READ_TRIGGER_SHIFT (0)
-#define TOD_READ_TRIGGER_MASK (0xf)
-
-/* Bit definitions for the DPLL_CTRL_COMBO_MASTER_CFG register */
-#define COMBO_MASTER_HOLD BIT(0)
-
-/* Bit definitions for DPLL_SYS_STATUS register */
-#define DPLL_SYS_STATE_MASK (0xf)
-
-/* Bit definitions for SYS_APLL_STATUS register */
-#define SYS_APLL_LOSS_LOCK_LIVE_MASK BIT(0)
-#define SYS_APLL_LOSS_LOCK_LIVE_LOCKED 0
-#define SYS_APLL_LOSS_LOCK_LIVE_UNLOCKED 1
-
-#endif
diff --git a/drivers/ptp/ptp_clockmatrix.c b/drivers/ptp/ptp_clockmatrix.c
index fa636951169e..6bc5791a7ec5 100644
--- a/drivers/ptp/ptp_clockmatrix.c
+++ b/drivers/ptp/ptp_clockmatrix.c
@@ -6,7 +6,7 @@
* Copyright (C) 2019 Integrated Device Technology, Inc., a Renesas Company.
*/
#include <linux/firmware.h>
-#include <linux/i2c.h>
+#include <linux/platform_device.h>
#include <linux/module.h>
#include <linux/ptp_clock_kernel.h>
#include <linux/delay.h>
@@ -14,6 +14,10 @@
#include <linux/kernel.h>
#include <linux/timekeeping.h>
#include <linux/string.h>
+#include <linux/of.h>
+#include <linux/mfd/rsmu.h>
+#include <linux/mfd/idt8a340_reg.h>
+#include <asm/unaligned.h>
#include "ptp_private.h"
#include "ptp_clockmatrix.h"
@@ -32,16 +36,43 @@ static char *firmware;
module_param(firmware, charp, 0);
#define SETTIME_CORRECTION (0)
+#define EXTTS_PERIOD_MS (95)
-static int contains_full_configuration(const struct firmware *fw)
+static int _idtcm_adjfine(struct idtcm_channel *channel, long scaled_ppm);
+
+static inline int idtcm_read(struct idtcm *idtcm,
+ u16 module,
+ u16 regaddr,
+ u8 *buf,
+ u16 count)
+{
+ return regmap_bulk_read(idtcm->regmap, module + regaddr, buf, count);
+}
+
+static inline int idtcm_write(struct idtcm *idtcm,
+ u16 module,
+ u16 regaddr,
+ u8 *buf,
+ u16 count)
+{
+ return regmap_bulk_write(idtcm->regmap, module + regaddr, buf, count);
+}
+
+static int contains_full_configuration(struct idtcm *idtcm,
+ const struct firmware *fw)
{
- s32 full_count = FULL_FW_CFG_BYTES - FULL_FW_CFG_SKIPPED_BYTES;
struct idtcm_fwrc *rec = (struct idtcm_fwrc *)fw->data;
+ u16 scratch = IDTCM_FW_REG(idtcm->fw_ver, V520, SCRATCH);
+ s32 full_count;
s32 count = 0;
u16 regaddr;
u8 loaddr;
s32 len;
+ /* 4 bytes skipped every 0x80 */
+ full_count = (scratch - GPIO_USER_CONTROL) -
+ ((scratch >> 7) - (GPIO_USER_CONTROL >> 7)) * 4;
+
/* If the firmware contains 'full configuration' SM_RESET can be used
* to ensure proper configuration.
*
@@ -57,7 +88,7 @@ static int contains_full_configuration(const struct firmware *fw)
rec++;
/* Top (status registers) and bottom are read-only */
- if (regaddr < GPIO_USER_CONTROL || regaddr >= SCRATCH)
+ if (regaddr < GPIO_USER_CONTROL || regaddr >= scratch)
continue;
/* Page size 128, last 4 bytes of page skipped */
@@ -152,132 +183,17 @@ static int idtcm_strverscmp(const char *version1, const char *version2)
return 0;
}
-static int idtcm_xfer_read(struct idtcm *idtcm,
- u8 regaddr,
- u8 *buf,
- u16 count)
-{
- struct i2c_client *client = idtcm->client;
- struct i2c_msg msg[2];
- int cnt;
-
- msg[0].addr = client->addr;
- msg[0].flags = 0;
- msg[0].len = 1;
- msg[0].buf = &regaddr;
-
- msg[1].addr = client->addr;
- msg[1].flags = I2C_M_RD;
- msg[1].len = count;
- msg[1].buf = buf;
-
- cnt = i2c_transfer(client->adapter, msg, 2);
-
- if (cnt < 0) {
- dev_err(&client->dev,
- "i2c_transfer failed at %d in %s, at addr: %04x!",
- __LINE__, __func__, regaddr);
- return cnt;
- } else if (cnt != 2) {
- dev_err(&client->dev,
- "i2c_transfer sent only %d of %d messages", cnt, 2);
- return -EIO;
- }
-
- return 0;
-}
-
-static int idtcm_xfer_write(struct idtcm *idtcm,
- u8 regaddr,
- u8 *buf,
- u16 count)
-{
- struct i2c_client *client = idtcm->client;
- /* we add 1 byte for device register */
- u8 msg[IDTCM_MAX_WRITE_COUNT + 1];
- int cnt;
-
- if (count > IDTCM_MAX_WRITE_COUNT)
- return -EINVAL;
-
- msg[0] = regaddr;
- memcpy(&msg[1], buf, count);
-
- cnt = i2c_master_send(client, msg, count + 1);
-
- if (cnt < 0) {
- dev_err(&client->dev,
- "i2c_master_send failed at %d in %s, at addr: %04x!",
- __LINE__, __func__, regaddr);
- return cnt;
- }
-
- return 0;
-}
-
-static int idtcm_page_offset(struct idtcm *idtcm, u8 val)
-{
- u8 buf[4];
- int err;
-
- if (idtcm->page_offset == val)
- return 0;
-
- buf[0] = 0x0;
- buf[1] = val;
- buf[2] = 0x10;
- buf[3] = 0x20;
-
- err = idtcm_xfer_write(idtcm, PAGE_ADDR, buf, sizeof(buf));
- if (err) {
- idtcm->page_offset = 0xff;
- dev_err(&idtcm->client->dev, "failed to set page offset");
- } else {
- idtcm->page_offset = val;
- }
-
- return err;
-}
-
-static int _idtcm_rdwr(struct idtcm *idtcm,
- u16 regaddr,
- u8 *buf,
- u16 count,
- bool write)
+static enum fw_version idtcm_fw_version(const char *version)
{
- u8 hi;
- u8 lo;
- int err;
+ enum fw_version ver = V_DEFAULT;
- hi = (regaddr >> 8) & 0xff;
- lo = regaddr & 0xff;
-
- err = idtcm_page_offset(idtcm, hi);
- if (err)
- return err;
-
- if (write)
- return idtcm_xfer_write(idtcm, lo, buf, count);
-
- return idtcm_xfer_read(idtcm, lo, buf, count);
-}
+ if (idtcm_strverscmp(version, "4.8.7") >= 0)
+ ver = V487;
-static int idtcm_read(struct idtcm *idtcm,
- u16 module,
- u16 regaddr,
- u8 *buf,
- u16 count)
-{
- return _idtcm_rdwr(idtcm, module + regaddr, buf, count, false);
-}
+ if (idtcm_strverscmp(version, "5.2.0") >= 0)
+ ver = V520;
-static int idtcm_write(struct idtcm *idtcm,
- u16 module,
- u16 regaddr,
- u8 *buf,
- u16 count)
-{
- return _idtcm_rdwr(idtcm, module + regaddr, buf, count, true);
+ return ver;
}
static int clear_boot_status(struct idtcm *idtcm)
@@ -318,11 +234,82 @@ static int wait_for_boot_status_ready(struct idtcm *idtcm)
} while (i);
- dev_warn(&idtcm->client->dev, "%s timed out", __func__);
+ dev_warn(idtcm->dev, "%s timed out", __func__);
return -EBUSY;
}
+static int _idtcm_set_scsr_read_trig(struct idtcm_channel *channel,
+ enum scsr_read_trig_sel trig, u8 ref)
+{
+ struct idtcm *idtcm = channel->idtcm;
+ u16 tod_read_cmd = IDTCM_FW_REG(idtcm->fw_ver, V520, TOD_READ_PRIMARY_CMD);
+ u8 val;
+ int err;
+
+ if (trig == SCSR_TOD_READ_TRIG_SEL_REFCLK) {
+ err = idtcm_read(idtcm, channel->tod_read_primary,
+ TOD_READ_PRIMARY_SEL_CFG_0, &val, sizeof(val));
+ if (err)
+ return err;
+
+ val &= ~(WR_REF_INDEX_MASK << WR_REF_INDEX_SHIFT);
+ val |= (ref << WR_REF_INDEX_SHIFT);
+
+ err = idtcm_write(idtcm, channel->tod_read_primary,
+ TOD_READ_PRIMARY_SEL_CFG_0, &val, sizeof(val));
+ if (err)
+ return err;
+ }
+
+ err = idtcm_read(idtcm, channel->tod_read_primary,
+ tod_read_cmd, &val, sizeof(val));
+ if (err)
+ return err;
+
+ val &= ~(TOD_READ_TRIGGER_MASK << TOD_READ_TRIGGER_SHIFT);
+ val |= (trig << TOD_READ_TRIGGER_SHIFT);
+ val &= ~TOD_READ_TRIGGER_MODE; /* single shot */
+
+ err = idtcm_write(idtcm, channel->tod_read_primary,
+ tod_read_cmd, &val, sizeof(val));
+ return err;
+}
+
+static int idtcm_enable_extts(struct idtcm_channel *channel, u8 todn, u8 ref,
+ bool enable)
+{
+ struct idtcm *idtcm = channel->idtcm;
+ u8 old_mask = idtcm->extts_mask;
+ u8 mask = 1 << todn;
+ int err = 0;
+
+ if (todn >= MAX_TOD)
+ return -EINVAL;
+
+ if (enable) {
+ if (ref > 0xF) /* E_REF_CLK15 */
+ return -EINVAL;
+ if (idtcm->extts_mask & mask)
+ return 0;
+ err = _idtcm_set_scsr_read_trig(&idtcm->channel[todn],
+ SCSR_TOD_READ_TRIG_SEL_REFCLK,
+ ref);
+ if (err == 0) {
+ idtcm->extts_mask |= mask;
+ idtcm->event_channel[todn] = channel;
+ idtcm->channel[todn].refn = ref;
+ }
+ } else
+ idtcm->extts_mask &= ~mask;
+
+ if (old_mask == 0 && idtcm->extts_mask)
+ schedule_delayed_work(&idtcm->extts_work,
+ msecs_to_jiffies(EXTTS_PERIOD_MS));
+
+ return err;
+}
+
static int read_sys_apll_status(struct idtcm *idtcm, u8 *status)
{
return idtcm_read(idtcm, STATUS, DPLL_SYS_APLL_STATUS, status,
@@ -359,7 +346,7 @@ static int wait_for_sys_apll_dpll_lock(struct idtcm *idtcm)
} else if (dpll == DPLL_STATE_FREERUN ||
dpll == DPLL_STATE_HOLDOVER ||
dpll == DPLL_STATE_OPEN_LOOP) {
- dev_warn(&idtcm->client->dev,
+ dev_warn(idtcm->dev,
"No wait state: DPLL_SYS_STATE %d", dpll);
return -EPERM;
}
@@ -367,7 +354,7 @@ static int wait_for_sys_apll_dpll_lock(struct idtcm *idtcm)
msleep(LOCK_POLL_INTERVAL_MS);
} while (time_is_after_jiffies(timeout));
- dev_warn(&idtcm->client->dev,
+ dev_warn(idtcm->dev,
"%d ms lock timeout: SYS APLL Loss Lock %d SYS DPLL state %d",
LOCK_TIMEOUT_MS, apll, dpll);
@@ -377,50 +364,36 @@ static int wait_for_sys_apll_dpll_lock(struct idtcm *idtcm)
static void wait_for_chip_ready(struct idtcm *idtcm)
{
if (wait_for_boot_status_ready(idtcm))
- dev_warn(&idtcm->client->dev, "BOOT_STATUS != 0xA0");
+ dev_warn(idtcm->dev, "BOOT_STATUS != 0xA0");
if (wait_for_sys_apll_dpll_lock(idtcm))
- dev_warn(&idtcm->client->dev,
+ dev_warn(idtcm->dev,
"Continuing while SYS APLL/DPLL is not locked");
}
static int _idtcm_gettime(struct idtcm_channel *channel,
- struct timespec64 *ts)
+ struct timespec64 *ts, u8 timeout)
{
struct idtcm *idtcm = channel->idtcm;
+ u16 tod_read_cmd = IDTCM_FW_REG(idtcm->fw_ver, V520, TOD_READ_PRIMARY_CMD);
u8 buf[TOD_BYTE_COUNT];
- u8 timeout = 10;
u8 trigger;
int err;
- err = idtcm_read(idtcm, channel->tod_read_primary,
- TOD_READ_PRIMARY_CMD, &trigger, sizeof(trigger));
- if (err)
- return err;
-
- trigger &= ~(TOD_READ_TRIGGER_MASK << TOD_READ_TRIGGER_SHIFT);
- trigger |= (1 << TOD_READ_TRIGGER_SHIFT);
- trigger &= ~TOD_READ_TRIGGER_MODE; /* single shot */
-
- err = idtcm_write(idtcm, channel->tod_read_primary,
- TOD_READ_PRIMARY_CMD, &trigger, sizeof(trigger));
- if (err)
- return err;
-
/* wait trigger to be 0 */
- while (trigger & TOD_READ_TRIGGER_MASK) {
+ do {
+ if (timeout-- == 0)
+ return -EIO;
+
if (idtcm->calculate_overhead_flag)
idtcm->start_time = ktime_get_raw();
err = idtcm_read(idtcm, channel->tod_read_primary,
- TOD_READ_PRIMARY_CMD, &trigger,
+ tod_read_cmd, &trigger,
sizeof(trigger));
if (err)
return err;
-
- if (--timeout == 0)
- return -EIO;
- }
+ } while (trigger & TOD_READ_TRIGGER_MASK);
err = idtcm_read(idtcm, channel->tod_read_primary,
TOD_READ_PRIMARY, buf, sizeof(buf));
@@ -432,6 +405,79 @@ static int _idtcm_gettime(struct idtcm_channel *channel,
return err;
}
+static int idtcm_extts_check_channel(struct idtcm *idtcm, u8 todn)
+{
+ struct idtcm_channel *ptp_channel, *extts_channel;
+ struct ptp_clock_event event;
+ struct timespec64 ts;
+ u32 dco_delay = 0;
+ int err;
+
+ extts_channel = &idtcm->channel[todn];
+ ptp_channel = idtcm->event_channel[todn];
+ if (extts_channel == ptp_channel)
+ dco_delay = ptp_channel->dco_delay;
+
+ err = _idtcm_gettime(extts_channel, &ts, 1);
+ if (err == 0) {
+ event.type = PTP_CLOCK_EXTTS;
+ event.index = todn;
+ event.timestamp = timespec64_to_ns(&ts) - dco_delay;
+ ptp_clock_event(ptp_channel->ptp_clock, &event);
+ }
+ return err;
+}
+
+static u8 idtcm_enable_extts_mask(struct idtcm_channel *channel,
+ u8 extts_mask, bool enable)
+{
+ struct idtcm *idtcm = channel->idtcm;
+ int i, err;
+
+ for (i = 0; i < MAX_TOD; i++) {
+ u8 mask = 1 << i;
+ u8 refn = idtcm->channel[i].refn;
+
+ if (extts_mask & mask) {
+ /* check extts before disabling it */
+ if (enable == false) {
+ err = idtcm_extts_check_channel(idtcm, i);
+ /* trigger happened so we won't re-enable it */
+ if (err == 0)
+ extts_mask &= ~mask;
+ }
+ (void)idtcm_enable_extts(channel, i, refn, enable);
+ }
+ }
+
+ return extts_mask;
+}
+
+static int _idtcm_gettime_immediate(struct idtcm_channel *channel,
+ struct timespec64 *ts)
+{
+ struct idtcm *idtcm = channel->idtcm;
+ u8 extts_mask = 0;
+ int err;
+
+ /* Disable extts */
+ if (idtcm->extts_mask) {
+ extts_mask = idtcm_enable_extts_mask(channel, idtcm->extts_mask,
+ false);
+ }
+
+ err = _idtcm_set_scsr_read_trig(channel,
+ SCSR_TOD_READ_TRIG_SEL_IMMEDIATE, 0);
+ if (err == 0)
+ err = _idtcm_gettime(channel, ts, 10);
+
+ /* Re-enable extts */
+ if (extts_mask)
+ idtcm_enable_extts_mask(channel, extts_mask, true);
+
+ return err;
+}
+
static int _sync_pll_output(struct idtcm *idtcm,
u8 pll,
u8 sync_src,
@@ -559,35 +605,10 @@ static int _sync_pll_output(struct idtcm *idtcm,
return err;
}
-static int sync_source_dpll_tod_pps(u16 tod_addr, u8 *sync_src)
-{
- int err = 0;
-
- switch (tod_addr) {
- case TOD_0:
- *sync_src = SYNC_SOURCE_DPLL0_TOD_PPS;
- break;
- case TOD_1:
- *sync_src = SYNC_SOURCE_DPLL1_TOD_PPS;
- break;
- case TOD_2:
- *sync_src = SYNC_SOURCE_DPLL2_TOD_PPS;
- break;
- case TOD_3:
- *sync_src = SYNC_SOURCE_DPLL3_TOD_PPS;
- break;
- default:
- err = -EINVAL;
- }
-
- return err;
-}
-
static int idtcm_sync_pps_output(struct idtcm_channel *channel)
{
struct idtcm *idtcm = channel->idtcm;
u8 pll;
- u8 sync_src;
u8 qn;
u8 qn_plus_1;
int err = 0;
@@ -596,10 +617,6 @@ static int idtcm_sync_pps_output(struct idtcm_channel *channel)
u8 temp;
u16 output_mask = channel->output_mask;
- err = sync_source_dpll_tod_pps(channel->tod_n, &sync_src);
- if (err)
- return err;
-
err = idtcm_read(idtcm, 0, HW_Q8_CTRL_SPARE,
&temp, sizeof(temp));
if (err)
@@ -655,8 +672,8 @@ static int idtcm_sync_pps_output(struct idtcm_channel *channel)
}
if (qn != 0 || qn_plus_1 != 0)
- err = _sync_pll_output(idtcm, pll, sync_src, qn,
- qn_plus_1);
+ err = _sync_pll_output(idtcm, pll, channel->sync_src,
+ qn, qn_plus_1);
if (err)
return err;
@@ -666,8 +683,8 @@ static int idtcm_sync_pps_output(struct idtcm_channel *channel)
}
static int _idtcm_set_dpll_hw_tod(struct idtcm_channel *channel,
- struct timespec64 const *ts,
- enum hw_tod_write_trig_sel wr_trig)
+ struct timespec64 const *ts,
+ enum hw_tod_write_trig_sel wr_trig)
{
struct idtcm *idtcm = channel->idtcm;
u8 buf[TOD_BYTE_COUNT];
@@ -784,7 +801,7 @@ static int _idtcm_set_dpll_scsr_tod(struct idtcm_channel *channel,
break;
if (++count > 20) {
- dev_err(&idtcm->client->dev,
+ dev_err(idtcm->dev,
"Timed out waiting for the write counter");
return -EIO;
}
@@ -793,46 +810,46 @@ static int _idtcm_set_dpll_scsr_tod(struct idtcm_channel *channel,
return 0;
}
-static int get_output_base_addr(u8 outn)
+static int get_output_base_addr(enum fw_version ver, u8 outn)
{
int base;
switch (outn) {
case 0:
- base = OUTPUT_0;
+ base = IDTCM_FW_REG(ver, V520, OUTPUT_0);
break;
case 1:
- base = OUTPUT_1;
+ base = IDTCM_FW_REG(ver, V520, OUTPUT_1);
break;
case 2:
- base = OUTPUT_2;
+ base = IDTCM_FW_REG(ver, V520, OUTPUT_2);
break;
case 3:
- base = OUTPUT_3;
+ base = IDTCM_FW_REG(ver, V520, OUTPUT_3);
break;
case 4:
- base = OUTPUT_4;
+ base = IDTCM_FW_REG(ver, V520, OUTPUT_4);
break;
case 5:
- base = OUTPUT_5;
+ base = IDTCM_FW_REG(ver, V520, OUTPUT_5);
break;
case 6:
- base = OUTPUT_6;
+ base = IDTCM_FW_REG(ver, V520, OUTPUT_6);
break;
case 7:
- base = OUTPUT_7;
+ base = IDTCM_FW_REG(ver, V520, OUTPUT_7);
break;
case 8:
- base = OUTPUT_8;
+ base = IDTCM_FW_REG(ver, V520, OUTPUT_8);
break;
case 9:
- base = OUTPUT_9;
+ base = IDTCM_FW_REG(ver, V520, OUTPUT_9);
break;
case 10:
- base = OUTPUT_10;
+ base = IDTCM_FW_REG(ver, V520, OUTPUT_10);
break;
case 11:
- base = OUTPUT_11;
+ base = IDTCM_FW_REG(ver, V520, OUTPUT_11);
break;
default:
base = -EINVAL;
@@ -849,7 +866,7 @@ static int _idtcm_settime_deprecated(struct idtcm_channel *channel,
err = _idtcm_set_dpll_hw_tod(channel, ts, HW_TOD_WR_TRIG_SEL_MSB);
if (err) {
- dev_err(&idtcm->client->dev,
+ dev_err(idtcm->dev,
"%s: Set HW ToD failed", __func__);
return err;
}
@@ -929,9 +946,9 @@ static int idtcm_start_phase_pull_in(struct idtcm_channel *channel)
return err;
}
-static int idtcm_do_phase_pull_in(struct idtcm_channel *channel,
- s32 offset_ns,
- u32 max_ffo_ppb)
+static int do_phase_pull_in_fw(struct idtcm_channel *channel,
+ s32 offset_ns,
+ u32 max_ffo_ppb)
{
int err;
@@ -1000,7 +1017,7 @@ static int _idtcm_adjtime_deprecated(struct idtcm_channel *channel, s64 delta)
s64 now;
if (abs(delta) < PHASE_PULL_IN_THRESHOLD_NS_DEPRECATED) {
- err = idtcm_do_phase_pull_in(channel, delta, 0);
+ err = channel->do_phase_pull_in(channel, delta, 0);
} else {
idtcm->calculate_overhead_flag = 1;
@@ -1008,7 +1025,7 @@ static int _idtcm_adjtime_deprecated(struct idtcm_channel *channel, s64 delta)
if (err)
return err;
- err = _idtcm_gettime(channel, &ts);
+ err = _idtcm_gettime_immediate(channel, &ts);
if (err)
return err;
@@ -1032,7 +1049,9 @@ static int idtcm_state_machine_reset(struct idtcm *idtcm)
clear_boot_status(idtcm);
- err = idtcm_write(idtcm, RESET_CTRL, SM_RESET, &byte, sizeof(byte));
+ err = idtcm_write(idtcm, RESET_CTRL,
+ IDTCM_FW_REG(idtcm->fw_ver, V520, SM_RESET),
+ &byte, sizeof(byte));
if (!err) {
for (i = 0; i < 30; i++) {
@@ -1040,14 +1059,14 @@ static int idtcm_state_machine_reset(struct idtcm *idtcm)
read_boot_status(idtcm, &status);
if (status == 0xA0) {
- dev_dbg(&idtcm->client->dev,
+ dev_dbg(idtcm->dev,
"SM_RESET completed in %d ms", i * 100);
break;
}
}
if (!status)
- dev_err(&idtcm->client->dev,
+ dev_err(idtcm->dev,
"Timed out waiting for CM_RESET to complete");
}
@@ -1144,12 +1163,12 @@ static int set_pll_output_mask(struct idtcm *idtcm, u16 addr, u8 val)
static int set_tod_ptp_pll(struct idtcm *idtcm, u8 index, u8 pll)
{
if (index >= MAX_TOD) {
- dev_err(&idtcm->client->dev, "ToD%d not supported", index);
+ dev_err(idtcm->dev, "ToD%d not supported", index);
return -EINVAL;
}
if (pll >= MAX_PLL) {
- dev_err(&idtcm->client->dev, "Pll%d not supported", pll);
+ dev_err(idtcm->dev, "Pll%d not supported", pll);
return -EINVAL;
}
@@ -1167,7 +1186,7 @@ static int check_and_set_masks(struct idtcm *idtcm,
switch (regaddr) {
case TOD_MASK_ADDR:
if ((val & 0xf0) || !(val & 0x0f)) {
- dev_err(&idtcm->client->dev, "Invalid TOD mask 0x%02x", val);
+ dev_err(idtcm->dev, "Invalid TOD mask 0x%02x", val);
err = -EINVAL;
} else {
idtcm->tod_mask = val;
@@ -1198,13 +1217,13 @@ static void display_pll_and_masks(struct idtcm *idtcm)
u8 i;
u8 mask;
- dev_dbg(&idtcm->client->dev, "tod_mask = 0x%02x", idtcm->tod_mask);
+ dev_dbg(idtcm->dev, "tod_mask = 0x%02x", idtcm->tod_mask);
for (i = 0; i < MAX_TOD; i++) {
mask = 1 << i;
if (mask & idtcm->tod_mask)
- dev_dbg(&idtcm->client->dev,
+ dev_dbg(idtcm->dev,
"TOD%d pll = %d output_mask = 0x%04x",
i, idtcm->channel[i].pll,
idtcm->channel[i].output_mask);
@@ -1214,6 +1233,7 @@ static void display_pll_and_masks(struct idtcm *idtcm)
static int idtcm_load_firmware(struct idtcm *idtcm,
struct device *dev)
{
+ u16 scratch = IDTCM_FW_REG(idtcm->fw_ver, V520, SCRATCH);
char fname[128] = FW_FILENAME;
const struct firmware *fw;
struct idtcm_fwrc *rec;
@@ -1226,25 +1246,25 @@ static int idtcm_load_firmware(struct idtcm *idtcm,
if (firmware) /* module parameter */
snprintf(fname, sizeof(fname), "%s", firmware);
- dev_dbg(&idtcm->client->dev, "requesting firmware '%s'", fname);
+ dev_info(idtcm->dev, "requesting firmware '%s'", fname);
err = request_firmware(&fw, fname, dev);
if (err) {
- dev_err(&idtcm->client->dev,
+ dev_err(idtcm->dev,
"Failed at line %d in %s!", __LINE__, __func__);
return err;
}
- dev_dbg(&idtcm->client->dev, "firmware size %zu bytes", fw->size);
+ dev_dbg(idtcm->dev, "firmware size %zu bytes", fw->size);
rec = (struct idtcm_fwrc *) fw->data;
- if (contains_full_configuration(fw))
+ if (contains_full_configuration(idtcm, fw))
idtcm_state_machine_reset(idtcm);
for (len = fw->size; len > 0; len -= sizeof(*rec)) {
if (rec->reserved) {
- dev_err(&idtcm->client->dev,
+ dev_err(idtcm->dev,
"bad firmware, reserved field non-zero");
err = -EINVAL;
} else {
@@ -1263,7 +1283,7 @@ static int idtcm_load_firmware(struct idtcm *idtcm,
err = 0;
/* Top (status registers) and bottom are read-only */
- if (regaddr < GPIO_USER_CONTROL || regaddr >= SCRATCH)
+ if (regaddr < GPIO_USER_CONTROL || regaddr >= scratch)
continue;
/* Page size 128, last 4 bytes of page skipped */
@@ -1292,10 +1312,10 @@ static int idtcm_output_enable(struct idtcm_channel *channel,
int err;
u8 val;
- base = get_output_base_addr(outn);
+ base = get_output_base_addr(idtcm->fw_ver, outn);
if (!(base > 0)) {
- dev_err(&idtcm->client->dev,
+ dev_err(idtcm->dev,
"%s - Unsupported out%d", __func__, outn);
return base;
}
@@ -1337,8 +1357,8 @@ static int idtcm_output_mask_enable(struct idtcm_channel *channel,
}
static int idtcm_perout_enable(struct idtcm_channel *channel,
- bool enable,
- struct ptp_perout_request *perout)
+ struct ptp_perout_request *perout,
+ bool enable)
{
struct idtcm *idtcm = channel->idtcm;
unsigned int flags = perout->flags;
@@ -1351,7 +1371,7 @@ static int idtcm_perout_enable(struct idtcm_channel *channel,
err = idtcm_output_enable(channel, enable, perout->index);
if (err) {
- dev_err(&idtcm->client->dev, "Unable to set output enable");
+ dev_err(idtcm->dev, "Unable to set output enable");
return err;
}
@@ -1360,53 +1380,331 @@ static int idtcm_perout_enable(struct idtcm_channel *channel,
}
static int idtcm_get_pll_mode(struct idtcm_channel *channel,
- enum pll_mode *pll_mode)
+ enum pll_mode *mode)
{
struct idtcm *idtcm = channel->idtcm;
int err;
u8 dpll_mode;
- err = idtcm_read(idtcm, channel->dpll_n, DPLL_MODE,
+ err = idtcm_read(idtcm, channel->dpll_n,
+ IDTCM_FW_REG(idtcm->fw_ver, V520, DPLL_MODE),
&dpll_mode, sizeof(dpll_mode));
if (err)
return err;
- *pll_mode = (dpll_mode >> PLL_MODE_SHIFT) & PLL_MODE_MASK;
+ *mode = (dpll_mode >> PLL_MODE_SHIFT) & PLL_MODE_MASK;
return 0;
}
static int idtcm_set_pll_mode(struct idtcm_channel *channel,
- enum pll_mode pll_mode)
+ enum pll_mode mode)
{
struct idtcm *idtcm = channel->idtcm;
int err;
u8 dpll_mode;
- err = idtcm_read(idtcm, channel->dpll_n, DPLL_MODE,
+ err = idtcm_read(idtcm, channel->dpll_n,
+ IDTCM_FW_REG(idtcm->fw_ver, V520, DPLL_MODE),
&dpll_mode, sizeof(dpll_mode));
if (err)
return err;
dpll_mode &= ~(PLL_MODE_MASK << PLL_MODE_SHIFT);
- dpll_mode |= (pll_mode << PLL_MODE_SHIFT);
-
- channel->pll_mode = pll_mode;
+ dpll_mode |= (mode << PLL_MODE_SHIFT);
- err = idtcm_write(idtcm, channel->dpll_n, DPLL_MODE,
+ err = idtcm_write(idtcm, channel->dpll_n,
+ IDTCM_FW_REG(idtcm->fw_ver, V520, DPLL_MODE),
&dpll_mode, sizeof(dpll_mode));
+ return err;
+}
+
+static int idtcm_get_manual_reference(struct idtcm_channel *channel,
+ enum manual_reference *ref)
+{
+ struct idtcm *idtcm = channel->idtcm;
+ u8 dpll_manu_ref_cfg;
+ int err;
+
+ err = idtcm_read(idtcm, channel->dpll_ctrl_n,
+ DPLL_CTRL_DPLL_MANU_REF_CFG,
+ &dpll_manu_ref_cfg, sizeof(dpll_manu_ref_cfg));
if (err)
return err;
+ dpll_manu_ref_cfg &= (MANUAL_REFERENCE_MASK << MANUAL_REFERENCE_SHIFT);
+
+ *ref = dpll_manu_ref_cfg >> MANUAL_REFERENCE_SHIFT;
+
return 0;
}
+static int idtcm_set_manual_reference(struct idtcm_channel *channel,
+ enum manual_reference ref)
+{
+ struct idtcm *idtcm = channel->idtcm;
+ u8 dpll_manu_ref_cfg;
+ int err;
+
+ err = idtcm_read(idtcm, channel->dpll_ctrl_n,
+ DPLL_CTRL_DPLL_MANU_REF_CFG,
+ &dpll_manu_ref_cfg, sizeof(dpll_manu_ref_cfg));
+ if (err)
+ return err;
+
+ dpll_manu_ref_cfg &= ~(MANUAL_REFERENCE_MASK << MANUAL_REFERENCE_SHIFT);
+
+ dpll_manu_ref_cfg |= (ref << MANUAL_REFERENCE_SHIFT);
+
+ err = idtcm_write(idtcm, channel->dpll_ctrl_n,
+ DPLL_CTRL_DPLL_MANU_REF_CFG,
+ &dpll_manu_ref_cfg, sizeof(dpll_manu_ref_cfg));
+
+ return err;
+}
+
+static int configure_dpll_mode_write_frequency(struct idtcm_channel *channel)
+{
+ struct idtcm *idtcm = channel->idtcm;
+ int err;
+
+ err = idtcm_set_pll_mode(channel, PLL_MODE_WRITE_FREQUENCY);
+
+ if (err)
+ dev_err(idtcm->dev, "Failed to set pll mode to write frequency");
+ else
+ channel->mode = PTP_PLL_MODE_WRITE_FREQUENCY;
+
+ return err;
+}
+
+static int configure_dpll_mode_write_phase(struct idtcm_channel *channel)
+{
+ struct idtcm *idtcm = channel->idtcm;
+ int err;
+
+ err = idtcm_set_pll_mode(channel, PLL_MODE_WRITE_PHASE);
+
+ if (err)
+ dev_err(idtcm->dev, "Failed to set pll mode to write phase");
+ else
+ channel->mode = PTP_PLL_MODE_WRITE_PHASE;
+
+ return err;
+}
+
+static int configure_manual_reference_write_frequency(struct idtcm_channel *channel)
+{
+ struct idtcm *idtcm = channel->idtcm;
+ int err;
+
+ err = idtcm_set_manual_reference(channel, MANU_REF_WRITE_FREQUENCY);
+
+ if (err)
+ dev_err(idtcm->dev, "Failed to set manual reference to write frequency");
+ else
+ channel->mode = PTP_PLL_MODE_WRITE_FREQUENCY;
+
+ return err;
+}
+
+static int configure_manual_reference_write_phase(struct idtcm_channel *channel)
+{
+ struct idtcm *idtcm = channel->idtcm;
+ int err;
+
+ err = idtcm_set_manual_reference(channel, MANU_REF_WRITE_PHASE);
+
+ if (err)
+ dev_err(idtcm->dev, "Failed to set manual reference to write phase");
+ else
+ channel->mode = PTP_PLL_MODE_WRITE_PHASE;
+
+ return err;
+}
+
+static int idtcm_stop_phase_pull_in(struct idtcm_channel *channel)
+{
+ int err;
+
+ err = _idtcm_adjfine(channel, channel->current_freq_scaled_ppm);
+ if (err)
+ return err;
+
+ channel->phase_pull_in = false;
+
+ return 0;
+}
+
+static long idtcm_work_handler(struct ptp_clock_info *ptp)
+{
+ struct idtcm_channel *channel = container_of(ptp, struct idtcm_channel, caps);
+ struct idtcm *idtcm = channel->idtcm;
+
+ mutex_lock(idtcm->lock);
+
+ (void)idtcm_stop_phase_pull_in(channel);
+
+ mutex_unlock(idtcm->lock);
+
+ /* Return a negative value here to not reschedule */
+ return -1;
+}
+
+static s32 phase_pull_in_scaled_ppm(s32 current_ppm, s32 phase_pull_in_ppb)
+{
+ /* ppb = scaled_ppm * 125 / 2^13 */
+ /* scaled_ppm = ppb * 2^13 / 125 */
+
+ s64 max_scaled_ppm = div_s64((s64)PHASE_PULL_IN_MAX_PPB << 13, 125);
+ s64 scaled_ppm = div_s64((s64)phase_pull_in_ppb << 13, 125);
+
+ current_ppm += scaled_ppm;
+
+ if (current_ppm > max_scaled_ppm)
+ current_ppm = max_scaled_ppm;
+ else if (current_ppm < -max_scaled_ppm)
+ current_ppm = -max_scaled_ppm;
+
+ return current_ppm;
+}
+
+static int do_phase_pull_in_sw(struct idtcm_channel *channel,
+ s32 delta_ns,
+ u32 max_ffo_ppb)
+{
+ s32 current_ppm = channel->current_freq_scaled_ppm;
+ u32 duration_ms = MSEC_PER_SEC;
+ s32 delta_ppm;
+ s32 ppb;
+ int err;
+
+ /* If the ToD correction is less than PHASE_PULL_IN_MIN_THRESHOLD_NS,
+ * skip. The error introduced by the ToD adjustment procedure would
+ * be bigger than the required ToD correction
+ */
+ if (abs(delta_ns) < PHASE_PULL_IN_MIN_THRESHOLD_NS)
+ return 0;
+
+ if (max_ffo_ppb == 0)
+ max_ffo_ppb = PHASE_PULL_IN_MAX_PPB;
+
+ /* For most cases, keep phase pull-in duration 1 second */
+ ppb = delta_ns;
+ while (abs(ppb) > max_ffo_ppb) {
+ duration_ms *= 2;
+ ppb /= 2;
+ }
+
+ delta_ppm = phase_pull_in_scaled_ppm(current_ppm, ppb);
+
+ err = _idtcm_adjfine(channel, delta_ppm);
+
+ if (err)
+ return err;
+
+ /* schedule the worker to cancel phase pull-in */
+ ptp_schedule_worker(channel->ptp_clock,
+ msecs_to_jiffies(duration_ms) - 1);
+
+ channel->phase_pull_in = true;
+
+ return 0;
+}
+
+static int initialize_operating_mode_with_manual_reference(struct idtcm_channel *channel,
+ enum manual_reference ref)
+{
+ struct idtcm *idtcm = channel->idtcm;
+
+ channel->mode = PTP_PLL_MODE_UNSUPPORTED;
+ channel->configure_write_frequency = configure_manual_reference_write_frequency;
+ channel->configure_write_phase = configure_manual_reference_write_phase;
+ channel->do_phase_pull_in = do_phase_pull_in_sw;
+
+ switch (ref) {
+ case MANU_REF_WRITE_PHASE:
+ channel->mode = PTP_PLL_MODE_WRITE_PHASE;
+ break;
+ case MANU_REF_WRITE_FREQUENCY:
+ channel->mode = PTP_PLL_MODE_WRITE_FREQUENCY;
+ break;
+ default:
+ dev_warn(idtcm->dev,
+ "Unsupported MANUAL_REFERENCE: 0x%02x", ref);
+ }
+
+ return 0;
+}
+
+static int initialize_operating_mode_with_pll_mode(struct idtcm_channel *channel,
+ enum pll_mode mode)
+{
+ struct idtcm *idtcm = channel->idtcm;
+ int err = 0;
+
+ channel->mode = PTP_PLL_MODE_UNSUPPORTED;
+ channel->configure_write_frequency = configure_dpll_mode_write_frequency;
+ channel->configure_write_phase = configure_dpll_mode_write_phase;
+ channel->do_phase_pull_in = do_phase_pull_in_fw;
+
+ switch (mode) {
+ case PLL_MODE_WRITE_PHASE:
+ channel->mode = PTP_PLL_MODE_WRITE_PHASE;
+ break;
+ case PLL_MODE_WRITE_FREQUENCY:
+ channel->mode = PTP_PLL_MODE_WRITE_FREQUENCY;
+ break;
+ default:
+ dev_err(idtcm->dev,
+ "Unsupported PLL_MODE: 0x%02x", mode);
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
+static int initialize_dco_operating_mode(struct idtcm_channel *channel)
+{
+ enum manual_reference ref = MANU_REF_XO_DPLL;
+ enum pll_mode mode = PLL_MODE_DISABLED;
+ struct idtcm *idtcm = channel->idtcm;
+ int err;
+
+ channel->mode = PTP_PLL_MODE_UNSUPPORTED;
+
+ err = idtcm_get_pll_mode(channel, &mode);
+ if (err) {
+ dev_err(idtcm->dev, "Unable to read pll mode!");
+ return err;
+ }
+
+ if (mode == PLL_MODE_PLL) {
+ err = idtcm_get_manual_reference(channel, &ref);
+ if (err) {
+ dev_err(idtcm->dev, "Unable to read manual reference!");
+ return err;
+ }
+ err = initialize_operating_mode_with_manual_reference(channel, ref);
+ } else {
+ err = initialize_operating_mode_with_pll_mode(channel, mode);
+ }
+
+ if (channel->mode == PTP_PLL_MODE_WRITE_PHASE)
+ channel->configure_write_frequency(channel);
+
+ return err;
+}
+
/* PTP Hardware Clock interface */
-/*
+/**
* Maximum absolute value for write phase offset in picoseconds
*
+ * @channel: channel
+ * @delta_ns: delta in nanoseconds
+ *
* Destination signed register is 32-bit register in resolution of 50ps
*
* 0x7fffffff * 50 = 2147483647 * 50 = 107374182350
@@ -1420,8 +1718,8 @@ static int _idtcm_adjphase(struct idtcm_channel *channel, s32 delta_ns)
s32 phase_50ps;
s64 offset_ps;
- if (channel->pll_mode != PLL_MODE_WRITE_PHASE) {
- err = idtcm_set_pll_mode(channel, PLL_MODE_WRITE_PHASE);
+ if (channel->mode != PTP_PLL_MODE_WRITE_PHASE) {
+ err = channel->configure_write_phase(channel);
if (err)
return err;
}
@@ -1459,8 +1757,8 @@ static int _idtcm_adjfine(struct idtcm_channel *channel, long scaled_ppm)
u8 buf[6] = {0};
s64 fcw;
- if (channel->pll_mode != PLL_MODE_WRITE_FREQUENCY) {
- err = idtcm_set_pll_mode(channel, PLL_MODE_WRITE_FREQUENCY);
+ if (channel->mode != PTP_PLL_MODE_WRITE_FREQUENCY) {
+ err = channel->configure_write_frequency(channel);
if (err)
return err;
}
@@ -1501,15 +1799,14 @@ static int idtcm_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
struct idtcm *idtcm = channel->idtcm;
int err;
- mutex_lock(&idtcm->reg_lock);
+ mutex_lock(idtcm->lock);
+ err = _idtcm_gettime_immediate(channel, ts);
+ mutex_unlock(idtcm->lock);
- err = _idtcm_gettime(channel, ts);
if (err)
- dev_err(&idtcm->client->dev, "Failed at line %d in %s!",
+ dev_err(idtcm->dev, "Failed at line %d in %s!",
__LINE__, __func__);
- mutex_unlock(&idtcm->reg_lock);
-
return err;
}
@@ -1520,15 +1817,14 @@ static int idtcm_settime_deprecated(struct ptp_clock_info *ptp,
struct idtcm *idtcm = channel->idtcm;
int err;
- mutex_lock(&idtcm->reg_lock);
-
+ mutex_lock(idtcm->lock);
err = _idtcm_settime_deprecated(channel, ts);
+ mutex_unlock(idtcm->lock);
+
if (err)
- dev_err(&idtcm->client->dev,
+ dev_err(idtcm->dev,
"Failed at line %d in %s!", __LINE__, __func__);
- mutex_unlock(&idtcm->reg_lock);
-
return err;
}
@@ -1539,15 +1835,14 @@ static int idtcm_settime(struct ptp_clock_info *ptp,
struct idtcm *idtcm = channel->idtcm;
int err;
- mutex_lock(&idtcm->reg_lock);
-
+ mutex_lock(idtcm->lock);
err = _idtcm_settime(channel, ts, SCSR_TOD_WR_TYPE_SEL_ABSOLUTE);
+ mutex_unlock(idtcm->lock);
+
if (err)
- dev_err(&idtcm->client->dev,
+ dev_err(idtcm->dev,
"Failed at line %d in %s!", __LINE__, __func__);
- mutex_unlock(&idtcm->reg_lock);
-
return err;
}
@@ -1557,15 +1852,14 @@ static int idtcm_adjtime_deprecated(struct ptp_clock_info *ptp, s64 delta)
struct idtcm *idtcm = channel->idtcm;
int err;
- mutex_lock(&idtcm->reg_lock);
-
+ mutex_lock(idtcm->lock);
err = _idtcm_adjtime_deprecated(channel, delta);
+ mutex_unlock(idtcm->lock);
+
if (err)
- dev_err(&idtcm->client->dev,
+ dev_err(idtcm->dev,
"Failed at line %d in %s!", __LINE__, __func__);
- mutex_unlock(&idtcm->reg_lock);
-
return err;
}
@@ -1577,31 +1871,30 @@ static int idtcm_adjtime(struct ptp_clock_info *ptp, s64 delta)
enum scsr_tod_write_type_sel type;
int err;
- if (abs(delta) < PHASE_PULL_IN_THRESHOLD_NS) {
- err = idtcm_do_phase_pull_in(channel, delta, 0);
- if (err)
- dev_err(&idtcm->client->dev,
- "Failed at line %d in %s!", __LINE__, __func__);
- return err;
- }
+ if (channel->phase_pull_in == true)
+ return 0;
- if (delta >= 0) {
- ts = ns_to_timespec64(delta);
- type = SCSR_TOD_WR_TYPE_SEL_DELTA_PLUS;
+ mutex_lock(idtcm->lock);
+
+ if (abs(delta) < PHASE_PULL_IN_THRESHOLD_NS) {
+ err = channel->do_phase_pull_in(channel, delta, 0);
} else {
- ts = ns_to_timespec64(-delta);
- type = SCSR_TOD_WR_TYPE_SEL_DELTA_MINUS;
+ if (delta >= 0) {
+ ts = ns_to_timespec64(delta);
+ type = SCSR_TOD_WR_TYPE_SEL_DELTA_PLUS;
+ } else {
+ ts = ns_to_timespec64(-delta);
+ type = SCSR_TOD_WR_TYPE_SEL_DELTA_MINUS;
+ }
+ err = _idtcm_settime(channel, &ts, type);
}
- mutex_lock(&idtcm->reg_lock);
+ mutex_unlock(idtcm->lock);
- err = _idtcm_settime(channel, &ts, type);
if (err)
- dev_err(&idtcm->client->dev,
+ dev_err(idtcm->dev,
"Failed at line %d in %s!", __LINE__, __func__);
- mutex_unlock(&idtcm->reg_lock);
-
return err;
}
@@ -1611,15 +1904,14 @@ static int idtcm_adjphase(struct ptp_clock_info *ptp, s32 delta)
struct idtcm *idtcm = channel->idtcm;
int err;
- mutex_lock(&idtcm->reg_lock);
-
+ mutex_lock(idtcm->lock);
err = _idtcm_adjphase(channel, delta);
+ mutex_unlock(idtcm->lock);
+
if (err)
- dev_err(&idtcm->client->dev,
+ dev_err(idtcm->dev,
"Failed at line %d in %s!", __LINE__, __func__);
- mutex_unlock(&idtcm->reg_lock);
-
return err;
}
@@ -1629,14 +1921,21 @@ static int idtcm_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
struct idtcm *idtcm = channel->idtcm;
int err;
- mutex_lock(&idtcm->reg_lock);
+ if (channel->phase_pull_in == true)
+ return 0;
+ if (scaled_ppm == channel->current_freq_scaled_ppm)
+ return 0;
+
+ mutex_lock(idtcm->lock);
err = _idtcm_adjfine(channel, scaled_ppm);
+ mutex_unlock(idtcm->lock);
+
if (err)
- dev_err(&idtcm->client->dev,
+ dev_err(idtcm->dev,
"Failed at line %d in %s!", __LINE__, __func__);
-
- mutex_unlock(&idtcm->reg_lock);
+ else
+ channel->current_freq_scaled_ppm = scaled_ppm;
return err;
}
@@ -1644,249 +1943,36 @@ static int idtcm_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
static int idtcm_enable(struct ptp_clock_info *ptp,
struct ptp_clock_request *rq, int on)
{
- int err;
struct idtcm_channel *channel = container_of(ptp, struct idtcm_channel, caps);
+ struct idtcm *idtcm = channel->idtcm;
+ int err = -EOPNOTSUPP;
+
+ mutex_lock(idtcm->lock);
switch (rq->type) {
case PTP_CLK_REQ_PEROUT:
- if (!on) {
- err = idtcm_perout_enable(channel, false, &rq->perout);
- if (err)
- dev_err(&channel->idtcm->client->dev,
- "Failed at line %d in %s!",
- __LINE__, __func__);
- return err;
- }
-
+ if (!on)
+ err = idtcm_perout_enable(channel, &rq->perout, false);
/* Only accept a 1-PPS aligned to the second. */
- if (rq->perout.start.nsec || rq->perout.period.sec != 1 ||
- rq->perout.period.nsec)
- return -ERANGE;
-
- err = idtcm_perout_enable(channel, true, &rq->perout);
- if (err)
- dev_err(&channel->idtcm->client->dev,
- "Failed at line %d in %s!", __LINE__, __func__);
- return err;
- default:
- break;
- }
-
- return -EOPNOTSUPP;
-}
-
-static int _enable_pll_tod_sync(struct idtcm *idtcm,
- u8 pll,
- u8 sync_src,
- u8 qn,
- u8 qn_plus_1)
-{
- int err;
- u8 val;
- u16 dpll;
- u16 out0 = 0, out1 = 0;
-
- if (qn == 0 && qn_plus_1 == 0)
- return 0;
-
- switch (pll) {
- case 0:
- dpll = DPLL_0;
- if (qn)
- out0 = OUTPUT_0;
- if (qn_plus_1)
- out1 = OUTPUT_1;
+ else if (rq->perout.start.nsec || rq->perout.period.sec != 1 ||
+ rq->perout.period.nsec)
+ err = -ERANGE;
+ else
+ err = idtcm_perout_enable(channel, &rq->perout, true);
break;
- case 1:
- dpll = DPLL_1;
- if (qn)
- out0 = OUTPUT_2;
- if (qn_plus_1)
- out1 = OUTPUT_3;
- break;
- case 2:
- dpll = DPLL_2;
- if (qn)
- out0 = OUTPUT_4;
- if (qn_plus_1)
- out1 = OUTPUT_5;
- break;
- case 3:
- dpll = DPLL_3;
- if (qn)
- out0 = OUTPUT_6;
- if (qn_plus_1)
- out1 = OUTPUT_7;
- break;
- case 4:
- dpll = DPLL_4;
- if (qn)
- out0 = OUTPUT_8;
- break;
- case 5:
- dpll = DPLL_5;
- if (qn)
- out0 = OUTPUT_9;
- if (qn_plus_1)
- out1 = OUTPUT_8;
- break;
- case 6:
- dpll = DPLL_6;
- if (qn)
- out0 = OUTPUT_10;
- if (qn_plus_1)
- out1 = OUTPUT_11;
- break;
- case 7:
- dpll = DPLL_7;
- if (qn)
- out0 = OUTPUT_11;
+ case PTP_CLK_REQ_EXTTS:
+ err = idtcm_enable_extts(channel, rq->extts.index,
+ rq->extts.rsv[0], on);
break;
default:
- return -EINVAL;
- }
-
- /*
- * Enable OUTPUT OUT_SYNC.
- */
- if (out0) {
- err = idtcm_read(idtcm, out0, OUT_CTRL_1, &val, sizeof(val));
- if (err)
- return err;
-
- val &= ~OUT_SYNC_DISABLE;
-
- err = idtcm_write(idtcm, out0, OUT_CTRL_1, &val, sizeof(val));
- if (err)
- return err;
- }
-
- if (out1) {
- err = idtcm_read(idtcm, out1, OUT_CTRL_1, &val, sizeof(val));
- if (err)
- return err;
-
- val &= ~OUT_SYNC_DISABLE;
-
- err = idtcm_write(idtcm, out1, OUT_CTRL_1, &val, sizeof(val));
- if (err)
- return err;
- }
-
- /* enable dpll sync tod pps, must be set before dpll_mode */
- err = idtcm_read(idtcm, dpll, DPLL_TOD_SYNC_CFG, &val, sizeof(val));
- if (err)
- return err;
-
- val &= ~(TOD_SYNC_SOURCE_MASK << TOD_SYNC_SOURCE_SHIFT);
- val |= (sync_src << TOD_SYNC_SOURCE_SHIFT);
- val |= TOD_SYNC_EN;
-
- return idtcm_write(idtcm, dpll, DPLL_TOD_SYNC_CFG, &val, sizeof(val));
-}
-
-static int idtcm_enable_tod_sync(struct idtcm_channel *channel)
-{
- struct idtcm *idtcm = channel->idtcm;
- u8 pll;
- u8 sync_src;
- u8 qn;
- u8 qn_plus_1;
- u8 cfg;
- int err = 0;
- u16 output_mask = channel->output_mask;
- u8 out8_mux = 0;
- u8 out11_mux = 0;
- u8 temp;
-
- /*
- * set tod_out_sync_enable to 0.
- */
- err = idtcm_read(idtcm, channel->tod_n, TOD_CFG, &cfg, sizeof(cfg));
- if (err)
- return err;
-
- cfg &= ~TOD_OUT_SYNC_ENABLE;
-
- err = idtcm_write(idtcm, channel->tod_n, TOD_CFG, &cfg, sizeof(cfg));
- if (err)
- return err;
-
- switch (channel->tod_n) {
- case TOD_0:
- sync_src = 0;
- break;
- case TOD_1:
- sync_src = 1;
- break;
- case TOD_2:
- sync_src = 2;
- break;
- case TOD_3:
- sync_src = 3;
break;
- default:
- return -EINVAL;
}
- err = idtcm_read(idtcm, 0, HW_Q8_CTRL_SPARE, &temp, sizeof(temp));
- if (err)
- return err;
-
- if ((temp & Q9_TO_Q8_FANOUT_AND_CLOCK_SYNC_ENABLE_MASK) ==
- Q9_TO_Q8_FANOUT_AND_CLOCK_SYNC_ENABLE_MASK)
- out8_mux = 1;
+ mutex_unlock(idtcm->lock);
- err = idtcm_read(idtcm, 0, HW_Q11_CTRL_SPARE, &temp, sizeof(temp));
if (err)
- return err;
-
- if ((temp & Q10_TO_Q11_FANOUT_AND_CLOCK_SYNC_ENABLE_MASK) ==
- Q10_TO_Q11_FANOUT_AND_CLOCK_SYNC_ENABLE_MASK)
- out11_mux = 1;
-
- for (pll = 0; pll < 8; pll++) {
- qn = 0;
- qn_plus_1 = 0;
-
- if (pll < 4) {
- /* First 4 pll has 2 outputs */
- qn = output_mask & 0x1;
- output_mask = output_mask >> 1;
- qn_plus_1 = output_mask & 0x1;
- output_mask = output_mask >> 1;
- } else if (pll == 4) {
- if (out8_mux == 0) {
- qn = output_mask & 0x1;
- output_mask = output_mask >> 1;
- }
- } else if (pll == 5) {
- if (out8_mux) {
- qn_plus_1 = output_mask & 0x1;
- output_mask = output_mask >> 1;
- }
- qn = output_mask & 0x1;
- output_mask = output_mask >> 1;
- } else if (pll == 6) {
- qn = output_mask & 0x1;
- output_mask = output_mask >> 1;
- if (out11_mux) {
- qn_plus_1 = output_mask & 0x1;
- output_mask = output_mask >> 1;
- }
- } else if (pll == 7) {
- if (out11_mux == 0) {
- qn = output_mask & 0x1;
- output_mask = output_mask >> 1;
- }
- }
-
- if (qn != 0 || qn_plus_1 != 0)
- err = _enable_pll_tod_sync(idtcm, pll, sync_src, qn,
- qn_plus_1);
- if (err)
- return err;
- }
+ dev_err(channel->idtcm->dev,
+ "Failed in %s with err %d!", __func__, err);
return err;
}
@@ -1895,23 +1981,31 @@ static int idtcm_enable_tod(struct idtcm_channel *channel)
{
struct idtcm *idtcm = channel->idtcm;
struct timespec64 ts = {0, 0};
+ u16 tod_cfg = IDTCM_FW_REG(idtcm->fw_ver, V520, TOD_CFG);
u8 cfg;
int err;
+ /* STEELAI-366 - Temporary workaround for ts2phc compatibility */
+ if (0) {
+ err = idtcm_output_mask_enable(channel, false);
+ if (err)
+ return err;
+ }
+
/*
* Start the TOD clock ticking.
*/
- err = idtcm_read(idtcm, channel->tod_n, TOD_CFG, &cfg, sizeof(cfg));
+ err = idtcm_read(idtcm, channel->tod_n, tod_cfg, &cfg, sizeof(cfg));
if (err)
return err;
cfg |= TOD_ENABLE;
- err = idtcm_write(idtcm, channel->tod_n, TOD_CFG, &cfg, sizeof(cfg));
+ err = idtcm_write(idtcm, channel->tod_n, tod_cfg, &cfg, sizeof(cfg));
if (err)
return err;
- if (idtcm->deprecated)
+ if (idtcm->fw_ver < V487)
return _idtcm_settime_deprecated(channel, &ts);
else
return _idtcm_settime(channel, &ts,
@@ -1939,12 +2033,9 @@ static void idtcm_set_version_info(struct idtcm *idtcm)
snprintf(idtcm->version, sizeof(idtcm->version), "%u.%u.%u",
major, minor, hotfix);
- if (idtcm_strverscmp(idtcm->version, "4.8.7") >= 0)
- idtcm->deprecated = 0;
- else
- idtcm->deprecated = 1;
+ idtcm->fw_ver = idtcm_fw_version(idtcm->version);
- dev_info(&idtcm->client->dev,
+ dev_info(idtcm->dev,
"%d.%d.%d, Id: 0x%04x HW Rev: %d OTP Config Select: %d",
major, minor, hotfix,
product_id, hw_rev_id, config_select);
@@ -1954,28 +2045,33 @@ static const struct ptp_clock_info idtcm_caps = {
.owner = THIS_MODULE,
.max_adj = 244000,
.n_per_out = 12,
+ .n_ext_ts = MAX_TOD,
.adjphase = &idtcm_adjphase,
.adjfine = &idtcm_adjfine,
.adjtime = &idtcm_adjtime,
.gettime64 = &idtcm_gettime,
.settime64 = &idtcm_settime,
.enable = &idtcm_enable,
+ .do_aux_work = &idtcm_work_handler,
};
static const struct ptp_clock_info idtcm_caps_deprecated = {
.owner = THIS_MODULE,
.max_adj = 244000,
.n_per_out = 12,
+ .n_ext_ts = MAX_TOD,
.adjphase = &idtcm_adjphase,
.adjfine = &idtcm_adjfine,
.adjtime = &idtcm_adjtime_deprecated,
.gettime64 = &idtcm_gettime,
.settime64 = &idtcm_settime_deprecated,
.enable = &idtcm_enable,
+ .do_aux_work = &idtcm_work_handler,
};
static int configure_channel_pll(struct idtcm_channel *channel)
{
+ struct idtcm *idtcm = channel->idtcm;
int err = 0;
switch (channel->pll) {
@@ -1997,7 +2093,7 @@ static int configure_channel_pll(struct idtcm_channel *channel)
break;
case 2:
channel->dpll_freq = DPLL_FREQ_2;
- channel->dpll_n = DPLL_2;
+ channel->dpll_n = IDTCM_FW_REG(idtcm->fw_ver, V520, DPLL_2);
channel->hw_dpll_n = HW_DPLL_2;
channel->dpll_phase = DPLL_PHASE_2;
channel->dpll_ctrl_n = DPLL_CTRL_2;
@@ -2013,7 +2109,7 @@ static int configure_channel_pll(struct idtcm_channel *channel)
break;
case 4:
channel->dpll_freq = DPLL_FREQ_4;
- channel->dpll_n = DPLL_4;
+ channel->dpll_n = IDTCM_FW_REG(idtcm->fw_ver, V520, DPLL_4);
channel->hw_dpll_n = HW_DPLL_4;
channel->dpll_phase = DPLL_PHASE_4;
channel->dpll_ctrl_n = DPLL_CTRL_4;
@@ -2029,7 +2125,7 @@ static int configure_channel_pll(struct idtcm_channel *channel)
break;
case 6:
channel->dpll_freq = DPLL_FREQ_6;
- channel->dpll_n = DPLL_6;
+ channel->dpll_n = IDTCM_FW_REG(idtcm->fw_ver, V520, DPLL_6);
channel->hw_dpll_n = HW_DPLL_6;
channel->dpll_phase = DPLL_PHASE_6;
channel->dpll_ctrl_n = DPLL_CTRL_6;
@@ -2050,50 +2146,104 @@ static int configure_channel_pll(struct idtcm_channel *channel)
return err;
}
-static int idtcm_enable_channel(struct idtcm *idtcm, u32 index)
+/*
+ * Compensate for the PTP DCO input-to-output delay.
+ * This delay is 18 FOD cycles.
+ */
+static u32 idtcm_get_dco_delay(struct idtcm_channel *channel)
{
- struct idtcm_channel *channel;
+ struct idtcm *idtcm = channel->idtcm;
+ u8 mbuf[8] = {0};
+ u8 nbuf[2] = {0};
+ u32 fodFreq;
int err;
+ u64 m;
+ u16 n;
- if (!(index < MAX_TOD))
- return -EINVAL;
-
- channel = &idtcm->channel[index];
+ err = idtcm_read(idtcm, channel->dpll_ctrl_n,
+ DPLL_CTRL_DPLL_FOD_FREQ, mbuf, 6);
+ if (err)
+ return 0;
- /* Set pll addresses */
- err = configure_channel_pll(channel);
+ err = idtcm_read(idtcm, channel->dpll_ctrl_n,
+ DPLL_CTRL_DPLL_FOD_FREQ + 6, nbuf, 2);
if (err)
- return err;
+ return 0;
+
+ m = get_unaligned_le64(mbuf);
+ n = get_unaligned_le16(nbuf);
+
+ if (n == 0)
+ n = 1;
+
+ fodFreq = (u32)div_u64(m, n);
+ if (fodFreq >= 500000000)
+ return 18 * (u32)div_u64(NSEC_PER_SEC, fodFreq);
+
+ return 0;
+}
+
+static int configure_channel_tod(struct idtcm_channel *channel, u32 index)
+{
+ enum fw_version fw_ver = channel->idtcm->fw_ver;
/* Set tod addresses */
switch (index) {
case 0:
- channel->tod_read_primary = TOD_READ_PRIMARY_0;
- channel->tod_write = TOD_WRITE_0;
- channel->tod_n = TOD_0;
+ channel->tod_read_primary = IDTCM_FW_REG(fw_ver, V520, TOD_READ_PRIMARY_0);
+ channel->tod_write = IDTCM_FW_REG(fw_ver, V520, TOD_WRITE_0);
+ channel->tod_n = IDTCM_FW_REG(fw_ver, V520, TOD_0);
+ channel->sync_src = SYNC_SOURCE_DPLL0_TOD_PPS;
break;
case 1:
- channel->tod_read_primary = TOD_READ_PRIMARY_1;
- channel->tod_write = TOD_WRITE_1;
- channel->tod_n = TOD_1;
+ channel->tod_read_primary = IDTCM_FW_REG(fw_ver, V520, TOD_READ_PRIMARY_1);
+ channel->tod_write = IDTCM_FW_REG(fw_ver, V520, TOD_WRITE_1);
+ channel->tod_n = IDTCM_FW_REG(fw_ver, V520, TOD_1);
+ channel->sync_src = SYNC_SOURCE_DPLL1_TOD_PPS;
break;
case 2:
- channel->tod_read_primary = TOD_READ_PRIMARY_2;
- channel->tod_write = TOD_WRITE_2;
- channel->tod_n = TOD_2;
+ channel->tod_read_primary = IDTCM_FW_REG(fw_ver, V520, TOD_READ_PRIMARY_2);
+ channel->tod_write = IDTCM_FW_REG(fw_ver, V520, TOD_WRITE_2);
+ channel->tod_n = IDTCM_FW_REG(fw_ver, V520, TOD_2);
+ channel->sync_src = SYNC_SOURCE_DPLL2_TOD_PPS;
break;
case 3:
- channel->tod_read_primary = TOD_READ_PRIMARY_3;
- channel->tod_write = TOD_WRITE_3;
- channel->tod_n = TOD_3;
+ channel->tod_read_primary = IDTCM_FW_REG(fw_ver, V520, TOD_READ_PRIMARY_3);
+ channel->tod_write = IDTCM_FW_REG(fw_ver, V520, TOD_WRITE_3);
+ channel->tod_n = IDTCM_FW_REG(fw_ver, V520, TOD_3);
+ channel->sync_src = SYNC_SOURCE_DPLL3_TOD_PPS;
break;
default:
return -EINVAL;
}
+ return 0;
+}
+
+static int idtcm_enable_channel(struct idtcm *idtcm, u32 index)
+{
+ struct idtcm_channel *channel;
+ int err;
+
+ if (!(index < MAX_TOD))
+ return -EINVAL;
+
+ channel = &idtcm->channel[index];
+
channel->idtcm = idtcm;
+ channel->current_freq_scaled_ppm = 0;
+
+ /* Set pll addresses */
+ err = configure_channel_pll(channel);
+ if (err)
+ return err;
- if (idtcm->deprecated)
+ /* Set tod addresses */
+ err = configure_channel_tod(channel, index);
+ if (err)
+ return err;
+
+ if (idtcm->fw_ver < V487)
channel->caps = idtcm_caps_deprecated;
else
channel->caps = idtcm_caps;
@@ -2101,30 +2251,19 @@ static int idtcm_enable_channel(struct idtcm *idtcm, u32 index)
snprintf(channel->caps.name, sizeof(channel->caps.name),
"IDT CM TOD%u", index);
- if (!idtcm->deprecated) {
- err = idtcm_enable_tod_sync(channel);
- if (err) {
- dev_err(&idtcm->client->dev,
- "Failed at line %d in %s!", __LINE__, __func__);
- return err;
- }
- }
-
- /* Sync pll mode with hardware */
- err = idtcm_get_pll_mode(channel, &channel->pll_mode);
- if (err) {
- dev_err(&idtcm->client->dev,
- "Error: %s - Unable to read pll mode", __func__);
+ err = initialize_dco_operating_mode(channel);
+ if (err)
return err;
- }
err = idtcm_enable_tod(channel);
if (err) {
- dev_err(&idtcm->client->dev,
+ dev_err(idtcm->dev,
"Failed at line %d in %s!", __LINE__, __func__);
return err;
}
+ channel->dco_delay = idtcm_get_dco_delay(channel);
+
channel->ptp_clock = ptp_clock_register(&channel->caps, NULL);
if (IS_ERR(channel->ptp_clock)) {
@@ -2136,12 +2275,59 @@ static int idtcm_enable_channel(struct idtcm *idtcm, u32 index)
if (!channel->ptp_clock)
return -ENOTSUPP;
- dev_info(&idtcm->client->dev, "PLL%d registered as ptp%d",
+ dev_info(idtcm->dev, "PLL%d registered as ptp%d",
index, channel->ptp_clock->index);
return 0;
}
+static int idtcm_enable_extts_channel(struct idtcm *idtcm, u32 index)
+{
+ struct idtcm_channel *channel;
+ int err;
+
+ if (!(index < MAX_TOD))
+ return -EINVAL;
+
+ channel = &idtcm->channel[index];
+ channel->idtcm = idtcm;
+
+ /* Set tod addresses */
+ err = configure_channel_tod(channel, index);
+ if (err)
+ return err;
+
+ channel->idtcm = idtcm;
+
+ return 0;
+}
+
+static void idtcm_extts_check(struct work_struct *work)
+{
+ struct idtcm *idtcm = container_of(work, struct idtcm, extts_work.work);
+ int err, i;
+
+ if (idtcm->extts_mask == 0)
+ return;
+
+ mutex_lock(idtcm->lock);
+ for (i = 0; i < MAX_TOD; i++) {
+ u8 mask = 1 << i;
+
+ if (idtcm->extts_mask & mask) {
+ err = idtcm_extts_check_channel(idtcm, i);
+ /* trigger clears itself, so clear the mask */
+ if (err == 0)
+ idtcm->extts_mask &= ~mask;
+ }
+ }
+
+ if (idtcm->extts_mask)
+ schedule_delayed_work(&idtcm->extts_work,
+ msecs_to_jiffies(EXTTS_PERIOD_MS));
+ mutex_unlock(idtcm->lock);
+}
+
static void ptp_clock_unregister_all(struct idtcm *idtcm)
{
u8 i;
@@ -2149,7 +2335,6 @@ static void ptp_clock_unregister_all(struct idtcm *idtcm)
for (i = 0; i < MAX_TOD; i++) {
channel = &idtcm->channel[i];
-
if (channel->ptp_clock)
ptp_clock_unregister(channel->ptp_clock);
}
@@ -2158,6 +2343,7 @@ static void ptp_clock_unregister_all(struct idtcm *idtcm)
static void set_default_masks(struct idtcm *idtcm)
{
idtcm->tod_mask = DEFAULT_TOD_MASK;
+ idtcm->extts_mask = 0;
idtcm->channel[0].pll = DEFAULT_TOD0_PTP_PLL;
idtcm->channel[1].pll = DEFAULT_TOD1_PTP_PLL;
@@ -2170,158 +2356,86 @@ static void set_default_masks(struct idtcm *idtcm)
idtcm->channel[3].output_mask = DEFAULT_OUTPUT_MASK_PLL3;
}
-static int idtcm_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int idtcm_probe(struct platform_device *pdev)
{
+ struct rsmu_ddata *ddata = dev_get_drvdata(pdev->dev.parent);
struct idtcm *idtcm;
int err;
u8 i;
- /* Unused for now */
- (void)id;
-
- idtcm = devm_kzalloc(&client->dev, sizeof(struct idtcm), GFP_KERNEL);
+ idtcm = devm_kzalloc(&pdev->dev, sizeof(struct idtcm), GFP_KERNEL);
if (!idtcm)
return -ENOMEM;
- idtcm->client = client;
- idtcm->page_offset = 0xff;
+ idtcm->dev = &pdev->dev;
+ idtcm->mfd = pdev->dev.parent;
+ idtcm->lock = &ddata->lock;
+ idtcm->regmap = ddata->regmap;
idtcm->calculate_overhead_flag = 0;
+ INIT_DELAYED_WORK(&idtcm->extts_work, idtcm_extts_check);
+
set_default_masks(idtcm);
- mutex_init(&idtcm->reg_lock);
- mutex_lock(&idtcm->reg_lock);
+ mutex_lock(idtcm->lock);
idtcm_set_version_info(idtcm);
- err = idtcm_load_firmware(idtcm, &client->dev);
+ err = idtcm_load_firmware(idtcm, &pdev->dev);
+
if (err)
- dev_warn(&idtcm->client->dev, "loading firmware failed with %d", err);
+ dev_warn(idtcm->dev, "loading firmware failed with %d", err);
wait_for_chip_ready(idtcm);
if (idtcm->tod_mask) {
for (i = 0; i < MAX_TOD; i++) {
- if (idtcm->tod_mask & (1 << i)) {
+ if (idtcm->tod_mask & (1 << i))
err = idtcm_enable_channel(idtcm, i);
- if (err) {
- dev_err(&idtcm->client->dev,
- "idtcm_enable_channel %d failed!", i);
- break;
- }
+ else
+ err = idtcm_enable_extts_channel(idtcm, i);
+ if (err) {
+ dev_err(idtcm->dev,
+ "idtcm_enable_channel %d failed!", i);
+ break;
}
}
} else {
- dev_err(&idtcm->client->dev,
+ dev_err(idtcm->dev,
"no PLLs flagged as PHCs, nothing to do");
err = -ENODEV;
}
- mutex_unlock(&idtcm->reg_lock);
+ mutex_unlock(idtcm->lock);
if (err) {
ptp_clock_unregister_all(idtcm);
return err;
}
- i2c_set_clientdata(client, idtcm);
+ platform_set_drvdata(pdev, idtcm);
return 0;
}
-static int idtcm_remove(struct i2c_client *client)
+static int idtcm_remove(struct platform_device *pdev)
{
- struct idtcm *idtcm = i2c_get_clientdata(client);
+ struct idtcm *idtcm = platform_get_drvdata(pdev);
ptp_clock_unregister_all(idtcm);
- mutex_destroy(&idtcm->reg_lock);
+ cancel_delayed_work_sync(&idtcm->extts_work);
return 0;
}
-#ifdef CONFIG_OF
-static const struct of_device_id idtcm_dt_id[] = {
- { .compatible = "idt,8a34000" },
- { .compatible = "idt,8a34001" },
- { .compatible = "idt,8a34002" },
- { .compatible = "idt,8a34003" },
- { .compatible = "idt,8a34004" },
- { .compatible = "idt,8a34005" },
- { .compatible = "idt,8a34006" },
- { .compatible = "idt,8a34007" },
- { .compatible = "idt,8a34008" },
- { .compatible = "idt,8a34009" },
- { .compatible = "idt,8a34010" },
- { .compatible = "idt,8a34011" },
- { .compatible = "idt,8a34012" },
- { .compatible = "idt,8a34013" },
- { .compatible = "idt,8a34014" },
- { .compatible = "idt,8a34015" },
- { .compatible = "idt,8a34016" },
- { .compatible = "idt,8a34017" },
- { .compatible = "idt,8a34018" },
- { .compatible = "idt,8a34019" },
- { .compatible = "idt,8a34040" },
- { .compatible = "idt,8a34041" },
- { .compatible = "idt,8a34042" },
- { .compatible = "idt,8a34043" },
- { .compatible = "idt,8a34044" },
- { .compatible = "idt,8a34045" },
- { .compatible = "idt,8a34046" },
- { .compatible = "idt,8a34047" },
- { .compatible = "idt,8a34048" },
- { .compatible = "idt,8a34049" },
- {},
-};
-MODULE_DEVICE_TABLE(of, idtcm_dt_id);
-#endif
-
-static const struct i2c_device_id idtcm_i2c_id[] = {
- { "8a34000" },
- { "8a34001" },
- { "8a34002" },
- { "8a34003" },
- { "8a34004" },
- { "8a34005" },
- { "8a34006" },
- { "8a34007" },
- { "8a34008" },
- { "8a34009" },
- { "8a34010" },
- { "8a34011" },
- { "8a34012" },
- { "8a34013" },
- { "8a34014" },
- { "8a34015" },
- { "8a34016" },
- { "8a34017" },
- { "8a34018" },
- { "8a34019" },
- { "8a34040" },
- { "8a34041" },
- { "8a34042" },
- { "8a34043" },
- { "8a34044" },
- { "8a34045" },
- { "8a34046" },
- { "8a34047" },
- { "8a34048" },
- { "8a34049" },
- {},
-};
-MODULE_DEVICE_TABLE(i2c, idtcm_i2c_id);
-
-static struct i2c_driver idtcm_driver = {
+static struct platform_driver idtcm_driver = {
.driver = {
- .of_match_table = of_match_ptr(idtcm_dt_id),
- .name = "idtcm",
+ .name = "8a3400x-phc",
},
- .probe = idtcm_probe,
- .remove = idtcm_remove,
- .id_table = idtcm_i2c_id,
+ .probe = idtcm_probe,
+ .remove = idtcm_remove,
};
-module_i2c_driver(idtcm_driver);
+module_platform_driver(idtcm_driver);
diff --git a/drivers/ptp/ptp_clockmatrix.h b/drivers/ptp/ptp_clockmatrix.h
index fb323271063e..0f3059ae1fff 100644
--- a/drivers/ptp/ptp_clockmatrix.h
+++ b/drivers/ptp/ptp_clockmatrix.h
@@ -9,8 +9,8 @@
#define PTP_IDTCLOCKMATRIX_H
#include <linux/ktime.h>
-
-#include "idt8a340_reg.h"
+#include <linux/mfd/idt8a340_reg.h>
+#include <linux/regmap.h>
#define FW_FILENAME "idtcm.bin"
#define MAX_TOD (4)
@@ -44,7 +44,6 @@
#define DEFAULT_TOD2_PTP_PLL (2)
#define DEFAULT_TOD3_PTP_PLL (3)
-#define POST_SM_RESET_DELAY_MS (3000)
#define PHASE_PULL_IN_THRESHOLD_NS_DEPRECATED (150000)
#define PHASE_PULL_IN_THRESHOLD_NS (15000)
#define TOD_WRITE_OVERHEAD_COUNT_MAX (2)
@@ -57,66 +56,26 @@
#define IDTCM_MAX_WRITE_COUNT (512)
-#define FULL_FW_CFG_BYTES (SCRATCH - GPIO_USER_CONTROL)
-#define FULL_FW_CFG_SKIPPED_BYTES (((SCRATCH >> 7) \
- - (GPIO_USER_CONTROL >> 7)) \
- * 4) /* 4 bytes skipped every 0x80 */
-
-/* Values of DPLL_N.DPLL_MODE.PLL_MODE */
-enum pll_mode {
- PLL_MODE_MIN = 0,
- PLL_MODE_NORMAL = PLL_MODE_MIN,
- PLL_MODE_WRITE_PHASE = 1,
- PLL_MODE_WRITE_FREQUENCY = 2,
- PLL_MODE_GPIO_INC_DEC = 3,
- PLL_MODE_SYNTHESIS = 4,
- PLL_MODE_PHASE_MEASUREMENT = 5,
- PLL_MODE_DISABLED = 6,
- PLL_MODE_MAX = PLL_MODE_DISABLED,
-};
-
-enum hw_tod_write_trig_sel {
- HW_TOD_WR_TRIG_SEL_MIN = 0,
- HW_TOD_WR_TRIG_SEL_MSB = HW_TOD_WR_TRIG_SEL_MIN,
- HW_TOD_WR_TRIG_SEL_RESERVED = 1,
- HW_TOD_WR_TRIG_SEL_TOD_PPS = 2,
- HW_TOD_WR_TRIG_SEL_IRIGB_PPS = 3,
- HW_TOD_WR_TRIG_SEL_PWM_PPS = 4,
- HW_TOD_WR_TRIG_SEL_GPIO = 5,
- HW_TOD_WR_TRIG_SEL_FOD_SYNC = 6,
- WR_TRIG_SEL_MAX = HW_TOD_WR_TRIG_SEL_FOD_SYNC,
-};
-
-/* 4.8.7 only */
-enum scsr_tod_write_trig_sel {
- SCSR_TOD_WR_TRIG_SEL_DISABLE = 0,
- SCSR_TOD_WR_TRIG_SEL_IMMEDIATE = 1,
- SCSR_TOD_WR_TRIG_SEL_REFCLK = 2,
- SCSR_TOD_WR_TRIG_SEL_PWMPPS = 3,
- SCSR_TOD_WR_TRIG_SEL_TODPPS = 4,
- SCSR_TOD_WR_TRIG_SEL_SYNCFOD = 5,
- SCSR_TOD_WR_TRIG_SEL_GPIO = 6,
- SCSR_TOD_WR_TRIG_SEL_MAX = SCSR_TOD_WR_TRIG_SEL_GPIO,
-};
+#define PHASE_PULL_IN_MAX_PPB (144000)
+#define PHASE_PULL_IN_MIN_THRESHOLD_NS (2)
-/* 4.8.7 only */
-enum scsr_tod_write_type_sel {
- SCSR_TOD_WR_TYPE_SEL_ABSOLUTE = 0,
- SCSR_TOD_WR_TYPE_SEL_DELTA_PLUS = 1,
- SCSR_TOD_WR_TYPE_SEL_DELTA_MINUS = 2,
- SCSR_TOD_WR_TYPE_SEL_MAX = SCSR_TOD_WR_TYPE_SEL_DELTA_MINUS,
+/*
+ * Return register address based on passed in firmware version
+ */
+#define IDTCM_FW_REG(FW, VER, REG) (((FW) < (VER)) ? (REG) : (REG##_##VER))
+enum fw_version {
+ V_DEFAULT = 0,
+ V487 = 1,
+ V520 = 2,
};
-/* Values STATUS.DPLL_SYS_STATUS.DPLL_SYS_STATE */
-enum dpll_state {
- DPLL_STATE_MIN = 0,
- DPLL_STATE_FREERUN = DPLL_STATE_MIN,
- DPLL_STATE_LOCKACQ = 1,
- DPLL_STATE_LOCKREC = 2,
- DPLL_STATE_LOCKED = 3,
- DPLL_STATE_HOLDOVER = 4,
- DPLL_STATE_OPEN_LOOP = 5,
- DPLL_STATE_MAX = DPLL_STATE_OPEN_LOOP,
+/* PTP PLL Mode */
+enum ptp_pll_mode {
+ PTP_PLL_MODE_MIN = 0,
+ PTP_PLL_MODE_WRITE_FREQUENCY = PTP_PLL_MODE_MIN,
+ PTP_PLL_MODE_WRITE_PHASE,
+ PTP_PLL_MODE_UNSUPPORTED,
+ PTP_PLL_MODE_MAX = PTP_PLL_MODE_UNSUPPORTED,
};
struct idtcm;
@@ -134,26 +93,40 @@ struct idtcm_channel {
u16 tod_write;
u16 tod_n;
u16 hw_dpll_n;
- enum pll_mode pll_mode;
+ u8 sync_src;
+ enum ptp_pll_mode mode;
+ int (*configure_write_frequency)(struct idtcm_channel *channel);
+ int (*configure_write_phase)(struct idtcm_channel *channel);
+ int (*do_phase_pull_in)(struct idtcm_channel *channel,
+ s32 offset_ns, u32 max_ffo_ppb);
+ s32 current_freq_scaled_ppm;
+ bool phase_pull_in;
+ u32 dco_delay;
+ /* last input trigger for extts */
+ u8 refn;
u8 pll;
u16 output_mask;
};
struct idtcm {
struct idtcm_channel channel[MAX_TOD];
- struct i2c_client *client;
- u8 page_offset;
+ struct device *dev;
u8 tod_mask;
char version[16];
- u8 deprecated;
-
+ enum fw_version fw_ver;
+ /* Polls for external time stamps */
+ u8 extts_mask;
+ struct delayed_work extts_work;
+ /* Remember the ptp channel to report extts */
+ struct idtcm_channel *event_channel[MAX_TOD];
+ /* Mutex to protect operations from being interrupted */
+ struct mutex *lock;
+ struct device *mfd;
+ struct regmap *regmap;
/* Overhead calculation for adjtime */
u8 calculate_overhead_flag;
s64 tod_write_overhead_ns;
ktime_t start_time;
-
- /* Protects I2C read/modify/write registers from concurrent access */
- struct mutex reg_lock;
};
struct idtcm_fwrc {
diff --git a/drivers/ptp/ptp_ocp.c b/drivers/ptp/ptp_ocp.c
index caf9b37c5eb1..34f943c8c9fd 100644
--- a/drivers/ptp/ptp_ocp.c
+++ b/drivers/ptp/ptp_ocp.c
@@ -4,6 +4,7 @@
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/debugfs.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/serial_8250.h>
@@ -72,7 +73,7 @@ struct tod_reg {
u32 status;
u32 uart_polarity;
u32 version;
- u32 correction_sec;
+ u32 adj_sec;
u32 __pad0[3];
u32 uart_baud;
u32 __pad1[3];
@@ -124,6 +125,55 @@ struct img_reg {
u32 version;
};
+struct gpio_reg {
+ u32 gpio1;
+ u32 __pad0;
+ u32 gpio2;
+ u32 __pad1;
+};
+
+struct irig_master_reg {
+ u32 ctrl;
+ u32 status;
+ u32 __pad0;
+ u32 version;
+ u32 adj_sec;
+ u32 mode_ctrl;
+};
+
+#define IRIG_M_CTRL_ENABLE BIT(0)
+
+struct irig_slave_reg {
+ u32 ctrl;
+ u32 status;
+ u32 __pad0;
+ u32 version;
+ u32 adj_sec;
+ u32 mode_ctrl;
+};
+
+#define IRIG_S_CTRL_ENABLE BIT(0)
+
+struct dcf_master_reg {
+ u32 ctrl;
+ u32 status;
+ u32 __pad0;
+ u32 version;
+ u32 adj_sec;
+};
+
+#define DCF_M_CTRL_ENABLE BIT(0)
+
+struct dcf_slave_reg {
+ u32 ctrl;
+ u32 status;
+ u32 __pad0;
+ u32 version;
+ u32 adj_sec;
+};
+
+#define DCF_S_CTRL_ENABLE BIT(0)
+
struct ptp_ocp_flash_info {
const char *name;
int pci_offset;
@@ -131,11 +181,17 @@ struct ptp_ocp_flash_info {
void *data;
};
-struct ptp_ocp_ext_info {
+struct ptp_ocp_i2c_info {
const char *name;
+ unsigned long fixed_rate;
+ size_t data_size;
+ void *data;
+};
+
+struct ptp_ocp_ext_info {
int index;
irqreturn_t (*irq_fcn)(int irq, void *priv);
- int (*enable)(void *priv, bool enable);
+ int (*enable)(void *priv, u32 req, bool enable);
};
struct ptp_ocp_ext_src {
@@ -153,9 +209,17 @@ struct ptp_ocp {
struct tod_reg __iomem *tod;
struct pps_reg __iomem *pps_to_ext;
struct pps_reg __iomem *pps_to_clk;
+ struct gpio_reg __iomem *pps_select;
+ struct gpio_reg __iomem *sma;
+ struct irig_master_reg __iomem *irig_out;
+ struct irig_slave_reg __iomem *irig_in;
+ struct dcf_master_reg __iomem *dcf_out;
+ struct dcf_slave_reg __iomem *dcf_in;
+ struct tod_reg __iomem *nmea_out;
struct ptp_ocp_ext_src *pps;
struct ptp_ocp_ext_src *ts0;
struct ptp_ocp_ext_src *ts1;
+ struct ptp_ocp_ext_src *ts2;
struct img_reg __iomem *image;
struct ptp_clock *ptp;
struct ptp_clock_info ptp_info;
@@ -163,16 +227,25 @@ struct ptp_ocp {
struct platform_device *spi_flash;
struct clk_hw *i2c_clk;
struct timer_list watchdog;
+ struct dentry *debug_root;
time64_t gnss_lost;
int id;
int n_irqs;
int gnss_port;
+ int gnss2_port;
int mac_port; /* miniature atomic clock */
+ int nmea_port;
u8 serial[6];
- int flash_start;
bool has_serial;
+ u32 pps_req_map;
+ int flash_start;
+ u32 utc_tai_offset;
+ u32 ts_window_adjust;
};
+#define OCP_REQ_TIMESTAMP BIT(0)
+#define OCP_REQ_PPS BIT(1)
+
struct ocp_resource {
unsigned long offset;
int size;
@@ -180,6 +253,7 @@ struct ocp_resource {
int (*setup)(struct ptp_ocp *bp, struct ocp_resource *r);
void *extra;
unsigned long bp_offset;
+ const char * const name;
};
static int ptp_ocp_register_mem(struct ptp_ocp *bp, struct ocp_resource *r);
@@ -189,7 +263,7 @@ static int ptp_ocp_register_serial(struct ptp_ocp *bp, struct ocp_resource *r);
static int ptp_ocp_register_ext(struct ptp_ocp *bp, struct ocp_resource *r);
static int ptp_ocp_fb_board_init(struct ptp_ocp *bp, struct ocp_resource *r);
static irqreturn_t ptp_ocp_ts_irq(int irq, void *priv);
-static int ptp_ocp_ts_enable(void *priv, bool enable);
+static int ptp_ocp_ts_enable(void *priv, u32 req, bool enable);
#define bp_assign_entry(bp, res, val) ({ \
uintptr_t addr = (uintptr_t)(bp) + (res)->bp_offset; \
@@ -197,7 +271,7 @@ static int ptp_ocp_ts_enable(void *priv, bool enable);
})
#define OCP_RES_LOCATION(member) \
- .bp_offset = offsetof(struct ptp_ocp, member)
+ .name = #member, .bp_offset = offsetof(struct ptp_ocp, member)
#define OCP_MEM_RESOURCE(member) \
OCP_RES_LOCATION(member), .setup = ptp_ocp_register_mem
@@ -215,16 +289,17 @@ static int ptp_ocp_ts_enable(void *priv, bool enable);
OCP_RES_LOCATION(member), .setup = ptp_ocp_register_ext
/* This is the MSI vector mapping used.
- * 0: N/C
+ * 0: TS3 (and PPS)
* 1: TS0
* 2: TS1
- * 3: GPS
- * 4: GPS2 (n/c)
+ * 3: GNSS
+ * 4: GNSS2
* 5: MAC
- * 6: SPI IMU (inertial measurement unit)
- * 7: I2C oscillator
- * 8: HWICAP
+ * 6: TS2
+ * 7: I2C controller
+ * 8: HWICAP (notused)
* 9: SPI Flash
+ * 10: NMEA
*/
static struct ocp_resource ocp_fb_resource[] = {
@@ -236,7 +311,7 @@ static struct ocp_resource ocp_fb_resource[] = {
OCP_EXT_RESOURCE(ts0),
.offset = 0x01010000, .size = 0x10000, .irq_vec = 1,
.extra = &(struct ptp_ocp_ext_info) {
- .name = "ts0", .index = 0,
+ .index = 0,
.irq_fcn = ptp_ocp_ts_irq,
.enable = ptp_ocp_ts_enable,
},
@@ -245,7 +320,25 @@ static struct ocp_resource ocp_fb_resource[] = {
OCP_EXT_RESOURCE(ts1),
.offset = 0x01020000, .size = 0x10000, .irq_vec = 2,
.extra = &(struct ptp_ocp_ext_info) {
- .name = "ts1", .index = 1,
+ .index = 1,
+ .irq_fcn = ptp_ocp_ts_irq,
+ .enable = ptp_ocp_ts_enable,
+ },
+ },
+ {
+ OCP_EXT_RESOURCE(ts2),
+ .offset = 0x01060000, .size = 0x10000, .irq_vec = 6,
+ .extra = &(struct ptp_ocp_ext_info) {
+ .index = 2,
+ .irq_fcn = ptp_ocp_ts_irq,
+ .enable = ptp_ocp_ts_enable,
+ },
+ },
+ {
+ OCP_EXT_RESOURCE(pps),
+ .offset = 0x010C0000, .size = 0x10000, .irq_vec = 0,
+ .extra = &(struct ptp_ocp_ext_info) {
+ .index = 3,
.irq_fcn = ptp_ocp_ts_irq,
.enable = ptp_ocp_ts_enable,
},
@@ -263,22 +356,62 @@ static struct ocp_resource ocp_fb_resource[] = {
.offset = 0x01050000, .size = 0x10000,
},
{
+ OCP_MEM_RESOURCE(irig_in),
+ .offset = 0x01070000, .size = 0x10000,
+ },
+ {
+ OCP_MEM_RESOURCE(irig_out),
+ .offset = 0x01080000, .size = 0x10000,
+ },
+ {
+ OCP_MEM_RESOURCE(dcf_in),
+ .offset = 0x01090000, .size = 0x10000,
+ },
+ {
+ OCP_MEM_RESOURCE(dcf_out),
+ .offset = 0x010A0000, .size = 0x10000,
+ },
+ {
+ OCP_MEM_RESOURCE(nmea_out),
+ .offset = 0x010B0000, .size = 0x10000,
+ },
+ {
OCP_MEM_RESOURCE(image),
.offset = 0x00020000, .size = 0x1000,
},
{
+ OCP_MEM_RESOURCE(pps_select),
+ .offset = 0x00130000, .size = 0x1000,
+ },
+ {
+ OCP_MEM_RESOURCE(sma),
+ .offset = 0x00140000, .size = 0x1000,
+ },
+ {
OCP_I2C_RESOURCE(i2c_ctrl),
.offset = 0x00150000, .size = 0x10000, .irq_vec = 7,
+ .extra = &(struct ptp_ocp_i2c_info) {
+ .name = "xiic-i2c",
+ .fixed_rate = 50000000,
+ },
},
{
OCP_SERIAL_RESOURCE(gnss_port),
.offset = 0x00160000 + 0x1000, .irq_vec = 3,
},
{
+ OCP_SERIAL_RESOURCE(gnss2_port),
+ .offset = 0x00170000 + 0x1000, .irq_vec = 4,
+ },
+ {
OCP_SERIAL_RESOURCE(mac_port),
.offset = 0x00180000 + 0x1000, .irq_vec = 5,
},
{
+ OCP_SERIAL_RESOURCE(nmea_port),
+ .offset = 0x00190000 + 0x1000, .irq_vec = 10,
+ },
+ {
OCP_SPI_RESOURCE(spi_flash),
.offset = 0x00310000, .size = 0x10000, .irq_vec = 9,
.extra = &(struct ptp_ocp_flash_info) {
@@ -309,10 +442,12 @@ MODULE_DEVICE_TABLE(pci, ptp_ocp_pcidev_id);
static DEFINE_MUTEX(ptp_ocp_lock);
static DEFINE_IDR(ptp_ocp_idr);
-static struct {
+struct ocp_selector {
const char *name;
int value;
-} ptp_ocp_clock[] = {
+};
+
+static struct ocp_selector ptp_ocp_clock[] = {
{ .name = "NONE", .value = 0 },
{ .name = "TOD", .value = 1 },
{ .name = "IRIG", .value = 2 },
@@ -322,33 +457,71 @@ static struct {
{ .name = "DCF", .value = 6 },
{ .name = "REGS", .value = 0xfe },
{ .name = "EXT", .value = 0xff },
+ { }
+};
+
+static struct ocp_selector ptp_ocp_sma_in[] = {
+ { .name = "10Mhz", .value = 0x00 },
+ { .name = "PPS1", .value = 0x01 },
+ { .name = "PPS2", .value = 0x02 },
+ { .name = "TS1", .value = 0x04 },
+ { .name = "TS2", .value = 0x08 },
+ { .name = "IRIG", .value = 0x10 },
+ { .name = "DCF", .value = 0x20 },
+ { }
+};
+
+static struct ocp_selector ptp_ocp_sma_out[] = {
+ { .name = "10Mhz", .value = 0x00 },
+ { .name = "PHC", .value = 0x01 },
+ { .name = "MAC", .value = 0x02 },
+ { .name = "GNSS", .value = 0x04 },
+ { .name = "GNSS2", .value = 0x08 },
+ { .name = "IRIG", .value = 0x10 },
+ { .name = "DCF", .value = 0x20 },
+ { }
};
static const char *
-ptp_ocp_clock_name_from_val(int val)
+ptp_ocp_select_name_from_val(struct ocp_selector *tbl, int val)
{
int i;
- for (i = 0; i < ARRAY_SIZE(ptp_ocp_clock); i++)
- if (ptp_ocp_clock[i].value == val)
- return ptp_ocp_clock[i].name;
+ for (i = 0; tbl[i].name; i++)
+ if (tbl[i].value == val)
+ return tbl[i].name;
return NULL;
}
static int
-ptp_ocp_clock_val_from_name(const char *name)
+ptp_ocp_select_val_from_name(struct ocp_selector *tbl, const char *name)
{
- const char *clk;
+ const char *select;
int i;
- for (i = 0; i < ARRAY_SIZE(ptp_ocp_clock); i++) {
- clk = ptp_ocp_clock[i].name;
- if (!strncasecmp(name, clk, strlen(clk)))
- return ptp_ocp_clock[i].value;
+ for (i = 0; tbl[i].name; i++) {
+ select = tbl[i].name;
+ if (!strncasecmp(name, select, strlen(select)))
+ return tbl[i].value;
}
return -EINVAL;
}
+static ssize_t
+ptp_ocp_select_table_show(struct ocp_selector *tbl, char *buf)
+{
+ ssize_t count;
+ int i;
+
+ count = 0;
+ for (i = 0; tbl[i].name; i++)
+ count += sysfs_emit_at(buf, count, "%s ", tbl[i].name);
+ if (count)
+ count--;
+ count += sysfs_emit_at(buf, count, "\n");
+ return count;
+}
+
static int
__ptp_ocp_gettime_locked(struct ptp_ocp *bp, struct timespec64 *ts,
struct ptp_system_timestamp *sts)
@@ -356,10 +529,9 @@ __ptp_ocp_gettime_locked(struct ptp_ocp *bp, struct timespec64 *ts,
u32 ctrl, time_sec, time_ns;
int i;
- ctrl = ioread32(&bp->reg->ctrl);
- ctrl |= OCP_CTRL_READ_TIME_REQ;
-
ptp_read_system_prets(sts);
+
+ ctrl = OCP_CTRL_READ_TIME_REQ | OCP_CTRL_ENABLE;
iowrite32(ctrl, &bp->reg->ctrl);
for (i = 0; i < 100; i++) {
@@ -369,6 +541,12 @@ __ptp_ocp_gettime_locked(struct ptp_ocp *bp, struct timespec64 *ts,
}
ptp_read_system_postts(sts);
+ if (sts && bp->ts_window_adjust) {
+ s64 ns = timespec64_to_ns(&sts->post_ts);
+
+ sts->post_ts = ns_to_timespec64(ns - bp->ts_window_adjust);
+ }
+
time_ns = ioread32(&bp->reg->time_ns);
time_sec = ioread32(&bp->reg->time_sec);
@@ -408,8 +586,7 @@ __ptp_ocp_settime_locked(struct ptp_ocp *bp, const struct timespec64 *ts)
iowrite32(time_ns, &bp->reg->adjust_ns);
iowrite32(time_sec, &bp->reg->adjust_sec);
- ctrl = ioread32(&bp->reg->ctrl);
- ctrl |= OCP_CTRL_ADJUST_TIME;
+ ctrl = OCP_CTRL_ADJUST_TIME | OCP_CTRL_ENABLE;
iowrite32(ctrl, &bp->reg->ctrl);
/* restore clock selection */
@@ -422,9 +599,6 @@ ptp_ocp_settime(struct ptp_clock_info *ptp_info, const struct timespec64 *ts)
struct ptp_ocp *bp = container_of(ptp_info, struct ptp_ocp, ptp_info);
unsigned long flags;
- if (ioread32(&bp->reg->status) & OCP_STATUS_IN_SYNC)
- return 0;
-
spin_lock_irqsave(&bp->lock, flags);
__ptp_ocp_settime_locked(bp, ts);
spin_unlock_irqrestore(&bp->lock, flags);
@@ -432,26 +606,39 @@ ptp_ocp_settime(struct ptp_clock_info *ptp_info, const struct timespec64 *ts)
return 0;
}
+static void
+__ptp_ocp_adjtime_locked(struct ptp_ocp *bp, u64 adj_val)
+{
+ u32 select, ctrl;
+
+ select = ioread32(&bp->reg->select);
+ iowrite32(OCP_SELECT_CLK_REG, &bp->reg->select);
+
+ iowrite32(adj_val, &bp->reg->offset_ns);
+ iowrite32(adj_val & 0x7f, &bp->reg->offset_window_ns);
+
+ ctrl = OCP_CTRL_ADJUST_OFFSET | OCP_CTRL_ENABLE;
+ iowrite32(ctrl, &bp->reg->ctrl);
+
+ /* restore clock selection */
+ iowrite32(select >> 16, &bp->reg->select);
+}
+
static int
ptp_ocp_adjtime(struct ptp_clock_info *ptp_info, s64 delta_ns)
{
struct ptp_ocp *bp = container_of(ptp_info, struct ptp_ocp, ptp_info);
- struct timespec64 ts;
unsigned long flags;
- int err;
+ u32 adj_ns, sign;
- if (ioread32(&bp->reg->status) & OCP_STATUS_IN_SYNC)
- return 0;
+ sign = delta_ns < 0 ? BIT(31) : 0;
+ adj_ns = sign ? -delta_ns : delta_ns;
spin_lock_irqsave(&bp->lock, flags);
- err = __ptp_ocp_gettime_locked(bp, &ts, NULL);
- if (likely(!err)) {
- timespec64_add_ns(&ts, delta_ns);
- __ptp_ocp_settime_locked(bp, &ts);
- }
+ __ptp_ocp_adjtime_locked(bp, sign | adj_ns);
spin_unlock_irqrestore(&bp->lock, flags);
- return err;
+ return 0;
}
static int
@@ -464,7 +651,7 @@ ptp_ocp_null_adjfine(struct ptp_clock_info *ptp_info, long scaled_ppm)
}
static int
-ptp_ocp_adjphase(struct ptp_clock_info *ptp_info, s32 phase_ns)
+ptp_ocp_null_adjphase(struct ptp_clock_info *ptp_info, s32 phase_ns)
{
return -EOPNOTSUPP;
}
@@ -475,10 +662,12 @@ ptp_ocp_enable(struct ptp_clock_info *ptp_info, struct ptp_clock_request *rq,
{
struct ptp_ocp *bp = container_of(ptp_info, struct ptp_ocp, ptp_info);
struct ptp_ocp_ext_src *ext = NULL;
+ u32 req;
int err;
switch (rq->type) {
case PTP_CLK_REQ_EXTTS:
+ req = OCP_REQ_TIMESTAMP;
switch (rq->extts.index) {
case 0:
ext = bp->ts0;
@@ -486,18 +675,33 @@ ptp_ocp_enable(struct ptp_clock_info *ptp_info, struct ptp_clock_request *rq,
case 1:
ext = bp->ts1;
break;
+ case 2:
+ ext = bp->ts2;
+ break;
+ case 3:
+ ext = bp->pps;
+ break;
}
break;
case PTP_CLK_REQ_PPS:
+ req = OCP_REQ_PPS;
ext = bp->pps;
break;
+ case PTP_CLK_REQ_PEROUT:
+ if (on &&
+ (rq->perout.period.sec != 1 || rq->perout.period.nsec != 0))
+ return -EINVAL;
+ /* This is a request for 1PPS on an output SMA.
+ * Allow, but assume manual configuration.
+ */
+ return 0;
default:
return -EOPNOTSUPP;
}
err = -ENXIO;
if (ext)
- err = ext->info->enable(ext, on);
+ err = ext->info->enable(ext, req, on);
return err;
}
@@ -510,10 +714,11 @@ static const struct ptp_clock_info ptp_ocp_clock_info = {
.settime64 = ptp_ocp_settime,
.adjtime = ptp_ocp_adjtime,
.adjfine = ptp_ocp_null_adjfine,
- .adjphase = ptp_ocp_adjphase,
+ .adjphase = ptp_ocp_null_adjphase,
.enable = ptp_ocp_enable,
.pps = true,
- .n_ext_ts = 2,
+ .n_ext_ts = 4,
+ .n_per_out = 1,
};
static void
@@ -526,8 +731,7 @@ __ptp_ocp_clear_drift_locked(struct ptp_ocp *bp)
iowrite32(0, &bp->reg->drift_ns);
- ctrl = ioread32(&bp->reg->ctrl);
- ctrl |= OCP_CTRL_ADJUST_DRIFT;
+ ctrl = OCP_CTRL_ADJUST_DRIFT | OCP_CTRL_ENABLE;
iowrite32(ctrl, &bp->reg->ctrl);
/* restore clock selection */
@@ -559,6 +763,28 @@ ptp_ocp_watchdog(struct timer_list *t)
mod_timer(&bp->watchdog, jiffies + HZ);
}
+static void
+ptp_ocp_estimate_pci_timing(struct ptp_ocp *bp)
+{
+ ktime_t start, end;
+ ktime_t delay;
+ u32 ctrl;
+
+ ctrl = ioread32(&bp->reg->ctrl);
+ ctrl = OCP_CTRL_READ_TIME_REQ | OCP_CTRL_ENABLE;
+
+ iowrite32(ctrl, &bp->reg->ctrl);
+
+ start = ktime_get_ns();
+
+ ctrl = ioread32(&bp->reg->ctrl);
+
+ end = ktime_get_ns();
+
+ delay = end - start;
+ bp->ts_window_adjust = (delay >> 5) * 3;
+}
+
static int
ptp_ocp_init_clock(struct ptp_ocp *bp)
{
@@ -566,9 +792,7 @@ ptp_ocp_init_clock(struct ptp_ocp *bp)
bool sync;
u32 ctrl;
- /* make sure clock is enabled */
- ctrl = ioread32(&bp->reg->ctrl);
- ctrl |= OCP_CTRL_ENABLE;
+ ctrl = OCP_CTRL_ENABLE;
iowrite32(ctrl, &bp->reg->ctrl);
/* NO DRIFT Correction */
@@ -587,23 +811,58 @@ ptp_ocp_init_clock(struct ptp_ocp *bp)
return -ENODEV;
}
+ ptp_ocp_estimate_pci_timing(bp);
+
sync = ioread32(&bp->reg->status) & OCP_STATUS_IN_SYNC;
if (!sync) {
- ktime_get_real_ts64(&ts);
+ ktime_get_clocktai_ts64(&ts);
ptp_ocp_settime(&bp->ptp_info, &ts);
}
- if (!ptp_ocp_gettimex(&bp->ptp_info, &ts, NULL))
- dev_info(&bp->pdev->dev, "Time: %lld.%ld, %s\n",
- ts.tv_sec, ts.tv_nsec,
- sync ? "in-sync" : "UNSYNCED");
- timer_setup(&bp->watchdog, ptp_ocp_watchdog, 0);
- mod_timer(&bp->watchdog, jiffies + HZ);
+ /* If there is a clock supervisor, then enable the watchdog */
+ if (bp->pps_to_clk) {
+ timer_setup(&bp->watchdog, ptp_ocp_watchdog, 0);
+ mod_timer(&bp->watchdog, jiffies + HZ);
+ }
return 0;
}
static void
+ptp_ocp_utc_distribute(struct ptp_ocp *bp, u32 val)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&bp->lock, flags);
+
+ bp->utc_tai_offset = val;
+
+ if (bp->irig_out)
+ iowrite32(val, &bp->irig_out->adj_sec);
+ if (bp->dcf_out)
+ iowrite32(val, &bp->dcf_out->adj_sec);
+ if (bp->nmea_out)
+ iowrite32(val, &bp->nmea_out->adj_sec);
+
+ spin_unlock_irqrestore(&bp->lock, flags);
+}
+
+static void
+ptp_ocp_tod_init(struct ptp_ocp *bp)
+{
+ u32 ctrl, reg;
+
+ ctrl = ioread32(&bp->tod->ctrl);
+ ctrl |= TOD_CTRL_PROTOCOL | TOD_CTRL_ENABLE;
+ ctrl &= ~(TOD_CTRL_DISABLE_FMT_A | TOD_CTRL_DISABLE_FMT_B);
+ iowrite32(ctrl, &bp->tod->ctrl);
+
+ reg = ioread32(&bp->tod->utc_status);
+ if (reg & TOD_STATUS_UTC_VALID)
+ ptp_ocp_utc_distribute(bp, reg & TOD_STATUS_UTC_MASK);
+}
+
+static void
ptp_ocp_tod_info(struct ptp_ocp *bp)
{
static const char * const proto_name[] = {
@@ -621,11 +880,6 @@ ptp_ocp_tod_info(struct ptp_ocp *bp)
version >> 24, (version >> 16) & 0xff, version & 0xffff);
ctrl = ioread32(&bp->tod->ctrl);
- ctrl |= TOD_CTRL_PROTOCOL | TOD_CTRL_ENABLE;
- ctrl &= ~(TOD_CTRL_DISABLE_FMT_A | TOD_CTRL_DISABLE_FMT_B);
- iowrite32(ctrl, &bp->tod->ctrl);
-
- ctrl = ioread32(&bp->tod->ctrl);
idx = ctrl & TOD_CTRL_PROTOCOL ? 4 : 0;
idx += (ctrl >> 16) & 3;
dev_info(&bp->pdev->dev, "control: %x\n", ctrl);
@@ -639,7 +893,7 @@ ptp_ocp_tod_info(struct ptp_ocp *bp)
reg = ioread32(&bp->tod->status);
dev_info(&bp->pdev->dev, "status: %x\n", reg);
- reg = ioread32(&bp->tod->correction_sec);
+ reg = ioread32(&bp->tod->adj_sec);
dev_info(&bp->pdev->dev, "correction: %d\n", reg);
reg = ioread32(&bp->tod->utc_status);
@@ -695,6 +949,9 @@ ptp_ocp_get_serial_number(struct ptp_ocp *bp)
struct device *dev;
int err;
+ if (!bp->i2c_ctrl)
+ return;
+
dev = device_find_child(&bp->i2c_ctrl->dev, NULL, ptp_ocp_firstchild);
if (!dev) {
dev_err(&bp->pdev->dev, "Can't find I2C adapter\n");
@@ -720,21 +977,6 @@ out:
put_device(dev);
}
-static void
-ptp_ocp_info(struct ptp_ocp *bp)
-{
- u32 version, select;
-
- version = ioread32(&bp->reg->version);
- select = ioread32(&bp->reg->select);
- dev_info(&bp->pdev->dev, "Version %d.%d.%d, clock %s, device ptp%d\n",
- version >> 24, (version >> 16) & 0xff, version & 0xffff,
- ptp_ocp_clock_name_from_val(select >> 16),
- ptp_clock_index(bp->ptp));
-
- ptp_ocp_tod_info(bp);
-}
-
static struct device *
ptp_ocp_find_flash(struct ptp_ocp *bp)
{
@@ -910,18 +1152,6 @@ ptp_ocp_register_spi(struct ptp_ocp *bp, struct ocp_resource *r)
unsigned long start;
int id;
- /* XXX hack to work around old FPGA */
- if (bp->n_irqs < 10) {
- dev_err(&bp->pdev->dev, "FPGA does not have SPI devices\n");
- return 0;
- }
-
- if (r->irq_vec > bp->n_irqs) {
- dev_err(&bp->pdev->dev, "spi device irq %d out of range\n",
- r->irq_vec);
- return 0;
- }
-
start = pci_resource_start(pdev, 0) + r->offset;
ptp_ocp_set_mem_resource(&res[0], start, r->size);
ptp_ocp_set_irq_resource(&res[1], pci_irq_vector(pdev, r->irq_vec));
@@ -944,41 +1174,41 @@ ptp_ocp_register_spi(struct ptp_ocp *bp, struct ocp_resource *r)
static struct platform_device *
ptp_ocp_i2c_bus(struct pci_dev *pdev, struct ocp_resource *r, int id)
{
+ struct ptp_ocp_i2c_info *info;
struct resource res[2];
unsigned long start;
+ info = r->extra;
start = pci_resource_start(pdev, 0) + r->offset;
ptp_ocp_set_mem_resource(&res[0], start, r->size);
ptp_ocp_set_irq_resource(&res[1], pci_irq_vector(pdev, r->irq_vec));
- return platform_device_register_resndata(&pdev->dev, "xiic-i2c",
- id, res, 2, NULL, 0);
+ return platform_device_register_resndata(&pdev->dev, info->name,
+ id, res, 2,
+ info->data, info->data_size);
}
static int
ptp_ocp_register_i2c(struct ptp_ocp *bp, struct ocp_resource *r)
{
struct pci_dev *pdev = bp->pdev;
+ struct ptp_ocp_i2c_info *info;
struct platform_device *p;
struct clk_hw *clk;
char buf[32];
int id;
- if (r->irq_vec > bp->n_irqs) {
- dev_err(&bp->pdev->dev, "i2c device irq %d out of range\n",
- r->irq_vec);
- return 0;
- }
-
+ info = r->extra;
id = pci_dev_id(bp->pdev);
sprintf(buf, "AXI.%d", id);
- clk = clk_hw_register_fixed_rate(&pdev->dev, buf, NULL, 0, 50000000);
+ clk = clk_hw_register_fixed_rate(&pdev->dev, buf, NULL, 0,
+ info->fixed_rate);
if (IS_ERR(clk))
return PTR_ERR(clk);
bp->i2c_clk = clk;
- sprintf(buf, "xiic-i2c.%d", id);
+ sprintf(buf, "%s.%d", info->name, id);
devm_clk_hw_register_clkdev(&pdev->dev, clk, NULL, buf);
p = ptp_ocp_i2c_bus(bp->pdev, r, id);
if (IS_ERR(p))
@@ -997,26 +1227,51 @@ ptp_ocp_ts_irq(int irq, void *priv)
struct ptp_clock_event ev;
u32 sec, nsec;
+ if (ext == ext->bp->pps) {
+ if (ext->bp->pps_req_map & OCP_REQ_PPS) {
+ ev.type = PTP_CLOCK_PPS;
+ ptp_clock_event(ext->bp->ptp, &ev);
+ }
+
+ if ((ext->bp->pps_req_map & ~OCP_REQ_PPS) == 0)
+ goto out;
+ }
+
/* XXX should fix API - this converts s/ns -> ts -> s/ns */
sec = ioread32(&reg->time_sec);
nsec = ioread32(&reg->time_ns);
ev.type = PTP_CLOCK_EXTTS;
ev.index = ext->info->index;
- ev.timestamp = sec * 1000000000ULL + nsec;
+ ev.timestamp = sec * NSEC_PER_SEC + nsec;
ptp_clock_event(ext->bp->ptp, &ev);
+out:
iowrite32(1, &reg->intr); /* write 1 to ack */
return IRQ_HANDLED;
}
static int
-ptp_ocp_ts_enable(void *priv, bool enable)
+ptp_ocp_ts_enable(void *priv, u32 req, bool enable)
{
struct ptp_ocp_ext_src *ext = priv;
struct ts_reg __iomem *reg = ext->mem;
+ struct ptp_ocp *bp = ext->bp;
+
+ if (ext == bp->pps) {
+ u32 old_map = bp->pps_req_map;
+
+ if (enable)
+ bp->pps_req_map |= req;
+ else
+ bp->pps_req_map &= ~req;
+
+ /* if no state change, just return */
+ if ((!!old_map ^ !!bp->pps_req_map) == 0)
+ return 0;
+ }
if (enable) {
iowrite32(1, &reg->enable);
@@ -1033,7 +1288,7 @@ ptp_ocp_ts_enable(void *priv, bool enable)
static void
ptp_ocp_unregister_ext(struct ptp_ocp_ext_src *ext)
{
- ext->info->enable(ext, false);
+ ext->info->enable(ext, ~0, false);
pci_free_irq(ext->bp->pdev, ext->irq_vec, ext);
kfree(ext);
}
@@ -1059,7 +1314,7 @@ ptp_ocp_register_ext(struct ptp_ocp *bp, struct ocp_resource *r)
ext->irq_vec = r->irq_vec;
err = pci_request_irq(pdev, r->irq_vec, ext->info->irq_fcn, NULL,
- ext, "ocp%d.%s", bp->id, ext->info->name);
+ ext, "ocp%d.%s", bp->id, r->name);
if (err) {
dev_err(&pdev->dev, "Could not get irq %d\n", r->irq_vec);
goto out;
@@ -1101,12 +1356,6 @@ ptp_ocp_register_serial(struct ptp_ocp *bp, struct ocp_resource *r)
{
int port;
- if (r->irq_vec > bp->n_irqs) {
- dev_err(&bp->pdev->dev, "serial device irq %d out of range\n",
- r->irq_vec);
- return 0;
- }
-
port = ptp_ocp_serial_line(bp, r);
if (port < 0)
return port;
@@ -1130,15 +1379,40 @@ ptp_ocp_register_mem(struct ptp_ocp *bp, struct ocp_resource *r)
return 0;
}
+static void
+ptp_ocp_nmea_out_init(struct ptp_ocp *bp)
+{
+ if (!bp->nmea_out)
+ return;
+
+ iowrite32(0, &bp->nmea_out->ctrl); /* disable */
+ iowrite32(7, &bp->nmea_out->uart_baud); /* 115200 */
+ iowrite32(1, &bp->nmea_out->ctrl); /* enable */
+}
+
/* FB specific board initializers; last "resource" registered. */
static int
ptp_ocp_fb_board_init(struct ptp_ocp *bp, struct ocp_resource *r)
{
bp->flash_start = 1024 * 4096;
+ ptp_ocp_tod_init(bp);
+ ptp_ocp_nmea_out_init(bp);
+
return ptp_ocp_init_clock(bp);
}
+static bool
+ptp_ocp_allow_irq(struct ptp_ocp *bp, struct ocp_resource *r)
+{
+ bool allow = !r->irq_vec || r->irq_vec < bp->n_irqs;
+
+ if (!allow)
+ dev_err(&bp->pdev->dev, "irq %d out of range, skipping %s\n",
+ r->irq_vec, r->name);
+ return allow;
+}
+
static int
ptp_ocp_register_resources(struct ptp_ocp *bp, kernel_ulong_t driver_data)
{
@@ -1147,13 +1421,373 @@ ptp_ocp_register_resources(struct ptp_ocp *bp, kernel_ulong_t driver_data)
table = (struct ocp_resource *)driver_data;
for (r = table; r->setup; r++) {
+ if (!ptp_ocp_allow_irq(bp, r))
+ continue;
err = r->setup(bp, r);
- if (err)
+ if (err) {
+ dev_err(&bp->pdev->dev,
+ "Could not register %s: err %d\n",
+ r->name, err);
break;
+ }
}
return err;
}
+static void
+ptp_ocp_enable_fpga(u32 __iomem *reg, u32 bit, bool enable)
+{
+ u32 ctrl;
+ bool on;
+
+ ctrl = ioread32(reg);
+ on = ctrl & bit;
+ if (on ^ enable) {
+ ctrl &= ~bit;
+ ctrl |= enable ? bit : 0;
+ iowrite32(ctrl, reg);
+ }
+}
+
+static void
+ptp_ocp_irig_out(struct ptp_ocp *bp, bool enable)
+{
+ return ptp_ocp_enable_fpga(&bp->irig_out->ctrl,
+ IRIG_M_CTRL_ENABLE, enable);
+}
+
+static void
+ptp_ocp_irig_in(struct ptp_ocp *bp, bool enable)
+{
+ return ptp_ocp_enable_fpga(&bp->irig_in->ctrl,
+ IRIG_S_CTRL_ENABLE, enable);
+}
+
+static void
+ptp_ocp_dcf_out(struct ptp_ocp *bp, bool enable)
+{
+ return ptp_ocp_enable_fpga(&bp->dcf_out->ctrl,
+ DCF_M_CTRL_ENABLE, enable);
+}
+
+static void
+ptp_ocp_dcf_in(struct ptp_ocp *bp, bool enable)
+{
+ return ptp_ocp_enable_fpga(&bp->dcf_in->ctrl,
+ DCF_S_CTRL_ENABLE, enable);
+}
+
+static void
+__handle_signal_outputs(struct ptp_ocp *bp, u32 val)
+{
+ ptp_ocp_irig_out(bp, val & 0x00100010);
+ ptp_ocp_dcf_out(bp, val & 0x00200020);
+}
+
+static void
+__handle_signal_inputs(struct ptp_ocp *bp, u32 val)
+{
+ ptp_ocp_irig_in(bp, val & 0x00100010);
+ ptp_ocp_dcf_in(bp, val & 0x00200020);
+}
+
+/*
+ * ANT0 == gps (in)
+ * ANT1 == sma1 (in)
+ * ANT2 == sma2 (in)
+ * ANT3 == sma3 (out)
+ * ANT4 == sma4 (out)
+ */
+
+enum ptp_ocp_sma_mode {
+ SMA_MODE_IN,
+ SMA_MODE_OUT,
+};
+
+static struct ptp_ocp_sma_connector {
+ enum ptp_ocp_sma_mode mode;
+ bool fixed_mode;
+ u16 default_out_idx;
+} ptp_ocp_sma_map[4] = {
+ {
+ .mode = SMA_MODE_IN,
+ .fixed_mode = true,
+ },
+ {
+ .mode = SMA_MODE_IN,
+ .fixed_mode = true,
+ },
+ {
+ .mode = SMA_MODE_OUT,
+ .fixed_mode = true,
+ .default_out_idx = 0, /* 10Mhz */
+ },
+ {
+ .mode = SMA_MODE_OUT,
+ .fixed_mode = true,
+ .default_out_idx = 1, /* PHC */
+ },
+};
+
+static ssize_t
+ptp_ocp_show_output(u32 val, char *buf, int default_idx)
+{
+ const char *name;
+ ssize_t count;
+
+ count = sysfs_emit(buf, "OUT: ");
+ name = ptp_ocp_select_name_from_val(ptp_ocp_sma_out, val);
+ if (!name)
+ name = ptp_ocp_sma_out[default_idx].name;
+ count += sysfs_emit_at(buf, count, "%s\n", name);
+ return count;
+}
+
+static ssize_t
+ptp_ocp_show_inputs(u32 val, char *buf, const char *zero_in)
+{
+ const char *name;
+ ssize_t count;
+ int i;
+
+ count = sysfs_emit(buf, "IN: ");
+ for (i = 0; i < ARRAY_SIZE(ptp_ocp_sma_in); i++) {
+ if (val & ptp_ocp_sma_in[i].value) {
+ name = ptp_ocp_sma_in[i].name;
+ count += sysfs_emit_at(buf, count, "%s ", name);
+ }
+ }
+ if (!val && zero_in)
+ count += sysfs_emit_at(buf, count, "%s ", zero_in);
+ if (count)
+ count--;
+ count += sysfs_emit_at(buf, count, "\n");
+ return count;
+}
+
+static int
+sma_parse_inputs(const char *buf, enum ptp_ocp_sma_mode *mode)
+{
+ struct ocp_selector *tbl[] = { ptp_ocp_sma_in, ptp_ocp_sma_out };
+ int idx, count, dir;
+ char **argv;
+ int ret;
+
+ argv = argv_split(GFP_KERNEL, buf, &count);
+ if (!argv)
+ return -ENOMEM;
+
+ ret = -EINVAL;
+ if (!count)
+ goto out;
+
+ idx = 0;
+ dir = *mode == SMA_MODE_IN ? 0 : 1;
+ if (!strcasecmp("IN:", argv[idx])) {
+ dir = 0;
+ idx++;
+ }
+ if (!strcasecmp("OUT:", argv[0])) {
+ dir = 1;
+ idx++;
+ }
+ *mode = dir == 0 ? SMA_MODE_IN : SMA_MODE_OUT;
+
+ ret = 0;
+ for (; idx < count; idx++)
+ ret |= ptp_ocp_select_val_from_name(tbl[dir], argv[idx]);
+ if (ret < 0)
+ ret = -EINVAL;
+
+out:
+ argv_free(argv);
+ return ret;
+}
+
+static ssize_t
+ptp_ocp_sma_show(struct ptp_ocp *bp, int sma_nr, u32 val, char *buf,
+ const char *zero_in)
+{
+ struct ptp_ocp_sma_connector *sma = &ptp_ocp_sma_map[sma_nr - 1];
+
+ if (sma->mode == SMA_MODE_IN)
+ return ptp_ocp_show_inputs(val, buf, zero_in);
+
+ return ptp_ocp_show_output(val, buf, sma->default_out_idx);
+}
+
+static ssize_t
+sma1_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct ptp_ocp *bp = dev_get_drvdata(dev);
+ u32 val;
+
+ val = ioread32(&bp->sma->gpio1) & 0x3f;
+ return ptp_ocp_sma_show(bp, 1, val, buf, ptp_ocp_sma_in[0].name);
+}
+
+static ssize_t
+sma2_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct ptp_ocp *bp = dev_get_drvdata(dev);
+ u32 val;
+
+ val = (ioread32(&bp->sma->gpio1) >> 16) & 0x3f;
+ return ptp_ocp_sma_show(bp, 2, val, buf, NULL);
+}
+
+static ssize_t
+sma3_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct ptp_ocp *bp = dev_get_drvdata(dev);
+ u32 val;
+
+ val = ioread32(&bp->sma->gpio2) & 0x3f;
+ return ptp_ocp_sma_show(bp, 3, val, buf, NULL);
+}
+
+static ssize_t
+sma4_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct ptp_ocp *bp = dev_get_drvdata(dev);
+ u32 val;
+
+ val = (ioread32(&bp->sma->gpio2) >> 16) & 0x3f;
+ return ptp_ocp_sma_show(bp, 4, val, buf, NULL);
+}
+
+static void
+ptp_ocp_sma_store_output(struct ptp_ocp *bp, u32 val, u32 shift)
+{
+ unsigned long flags;
+ u32 gpio, mask;
+
+ mask = 0xffff << (16 - shift);
+
+ spin_lock_irqsave(&bp->lock, flags);
+
+ gpio = ioread32(&bp->sma->gpio2);
+ gpio = (gpio & mask) | (val << shift);
+
+ __handle_signal_outputs(bp, gpio);
+
+ iowrite32(gpio, &bp->sma->gpio2);
+
+ spin_unlock_irqrestore(&bp->lock, flags);
+}
+
+static void
+ptp_ocp_sma_store_inputs(struct ptp_ocp *bp, u32 val, u32 shift)
+{
+ unsigned long flags;
+ u32 gpio, mask;
+
+ mask = 0xffff << (16 - shift);
+
+ spin_lock_irqsave(&bp->lock, flags);
+
+ gpio = ioread32(&bp->sma->gpio1);
+ gpio = (gpio & mask) | (val << shift);
+
+ __handle_signal_inputs(bp, gpio);
+
+ iowrite32(gpio, &bp->sma->gpio1);
+
+ spin_unlock_irqrestore(&bp->lock, flags);
+}
+
+static ssize_t
+ptp_ocp_sma_store(struct ptp_ocp *bp, const char *buf, int sma_nr, u32 shift)
+{
+ struct ptp_ocp_sma_connector *sma = &ptp_ocp_sma_map[sma_nr - 1];
+ enum ptp_ocp_sma_mode mode;
+ int val;
+
+ mode = sma->mode;
+ val = sma_parse_inputs(buf, &mode);
+ if (val < 0)
+ return val;
+
+ if (mode != sma->mode && sma->fixed_mode)
+ return -EOPNOTSUPP;
+
+ if (mode != sma->mode) {
+ pr_err("Mode changes not supported yet.\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (sma->mode == SMA_MODE_IN)
+ ptp_ocp_sma_store_inputs(bp, val, shift);
+ else
+ ptp_ocp_sma_store_output(bp, val, shift);
+
+ return 0;
+}
+
+static ssize_t
+sma1_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ptp_ocp *bp = dev_get_drvdata(dev);
+ int err;
+
+ err = ptp_ocp_sma_store(bp, buf, 1, 0);
+ return err ? err : count;
+}
+
+static ssize_t
+sma2_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ptp_ocp *bp = dev_get_drvdata(dev);
+ int err;
+
+ err = ptp_ocp_sma_store(bp, buf, 2, 16);
+ return err ? err : count;
+}
+
+static ssize_t
+sma3_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ptp_ocp *bp = dev_get_drvdata(dev);
+ int err;
+
+ err = ptp_ocp_sma_store(bp, buf, 3, 0);
+ return err ? err : count;
+}
+
+static ssize_t
+sma4_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ptp_ocp *bp = dev_get_drvdata(dev);
+ int err;
+
+ err = ptp_ocp_sma_store(bp, buf, 4, 16);
+ return err ? err : count;
+}
+static DEVICE_ATTR_RW(sma1);
+static DEVICE_ATTR_RW(sma2);
+static DEVICE_ATTR_RW(sma3);
+static DEVICE_ATTR_RW(sma4);
+
+static ssize_t
+available_sma_inputs_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return ptp_ocp_select_table_show(ptp_ocp_sma_in, buf);
+}
+static DEVICE_ATTR_RO(available_sma_inputs);
+
+static ssize_t
+available_sma_outputs_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return ptp_ocp_select_table_show(ptp_ocp_sma_out, buf);
+}
+static DEVICE_ATTR_RO(available_sma_outputs);
+
static ssize_t
serialnum_show(struct device *dev, struct device_attribute *attr, char *buf)
{
@@ -1182,6 +1816,102 @@ gnss_sync_show(struct device *dev, struct device_attribute *attr, char *buf)
static DEVICE_ATTR_RO(gnss_sync);
static ssize_t
+utc_tai_offset_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ptp_ocp *bp = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%d\n", bp->utc_tai_offset);
+}
+
+static ssize_t
+utc_tai_offset_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ptp_ocp *bp = dev_get_drvdata(dev);
+ int err;
+ u32 val;
+
+ err = kstrtou32(buf, 0, &val);
+ if (err)
+ return err;
+
+ ptp_ocp_utc_distribute(bp, val);
+
+ return count;
+}
+static DEVICE_ATTR_RW(utc_tai_offset);
+
+static ssize_t
+ts_window_adjust_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ptp_ocp *bp = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%d\n", bp->ts_window_adjust);
+}
+
+static ssize_t
+ts_window_adjust_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ptp_ocp *bp = dev_get_drvdata(dev);
+ int err;
+ u32 val;
+
+ err = kstrtou32(buf, 0, &val);
+ if (err)
+ return err;
+
+ bp->ts_window_adjust = val;
+
+ return count;
+}
+static DEVICE_ATTR_RW(ts_window_adjust);
+
+static ssize_t
+irig_b_mode_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct ptp_ocp *bp = dev_get_drvdata(dev);
+ u32 val;
+
+ val = ioread32(&bp->irig_out->ctrl);
+ val = (val >> 16) & 0x07;
+ return sysfs_emit(buf, "%d\n", val);
+}
+
+static ssize_t
+irig_b_mode_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ptp_ocp *bp = dev_get_drvdata(dev);
+ unsigned long flags;
+ int err;
+ u32 reg;
+ u8 val;
+
+ err = kstrtou8(buf, 0, &val);
+ if (err)
+ return err;
+ if (val > 7)
+ return -EINVAL;
+
+ reg = ((val & 0x7) << 16);
+
+ spin_lock_irqsave(&bp->lock, flags);
+ iowrite32(0, &bp->irig_out->ctrl); /* disable */
+ iowrite32(reg, &bp->irig_out->ctrl); /* change mode */
+ iowrite32(reg | IRIG_M_CTRL_ENABLE, &bp->irig_out->ctrl);
+ spin_unlock_irqrestore(&bp->lock, flags);
+
+ return count;
+}
+static DEVICE_ATTR_RW(irig_b_mode);
+
+static ssize_t
clock_source_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct ptp_ocp *bp = dev_get_drvdata(dev);
@@ -1189,7 +1919,7 @@ clock_source_show(struct device *dev, struct device_attribute *attr, char *buf)
u32 select;
select = ioread32(&bp->reg->select);
- p = ptp_ocp_clock_name_from_val(select >> 16);
+ p = ptp_ocp_select_name_from_val(ptp_ocp_clock, select >> 16);
return sysfs_emit(buf, "%s\n", p);
}
@@ -1202,7 +1932,7 @@ clock_source_store(struct device *dev, struct device_attribute *attr,
unsigned long flags;
int val;
- val = ptp_ocp_clock_val_from_name(buf);
+ val = ptp_ocp_select_val_from_name(ptp_ocp_clock, buf);
if (val < 0)
return val;
@@ -1218,19 +1948,7 @@ static ssize_t
available_clock_sources_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- const char *clk;
- ssize_t count;
- int i;
-
- count = 0;
- for (i = 0; i < ARRAY_SIZE(ptp_ocp_clock); i++) {
- clk = ptp_ocp_clock[i].name;
- count += sysfs_emit_at(buf, count, "%s ", clk);
- }
- if (count)
- count--;
- count += sysfs_emit_at(buf, count, "\n");
- return count;
+ return ptp_ocp_select_table_show(ptp_ocp_clock, buf);
}
static DEVICE_ATTR_RO(available_clock_sources);
@@ -1239,10 +1957,258 @@ static struct attribute *timecard_attrs[] = {
&dev_attr_gnss_sync.attr,
&dev_attr_clock_source.attr,
&dev_attr_available_clock_sources.attr,
+ &dev_attr_sma1.attr,
+ &dev_attr_sma2.attr,
+ &dev_attr_sma3.attr,
+ &dev_attr_sma4.attr,
+ &dev_attr_available_sma_inputs.attr,
+ &dev_attr_available_sma_outputs.attr,
+ &dev_attr_irig_b_mode.attr,
+ &dev_attr_utc_tai_offset.attr,
+ &dev_attr_ts_window_adjust.attr,
NULL,
};
ATTRIBUTE_GROUPS(timecard);
+static const char *
+gpio_map(u32 gpio, u32 bit, const char *pri, const char *sec, const char *def)
+{
+ const char *ans;
+
+ if (gpio & (1 << bit))
+ ans = pri;
+ else if (gpio & (1 << (bit + 16)))
+ ans = sec;
+ else
+ ans = def;
+ return ans;
+}
+
+static void
+gpio_multi_map(char *buf, u32 gpio, u32 bit,
+ const char *pri, const char *sec, const char *def)
+{
+ char *ans = buf;
+
+ strcpy(ans, def);
+ if (gpio & (1 << bit))
+ ans += sprintf(ans, "%s ", pri);
+ if (gpio & (1 << (bit + 16)))
+ ans += sprintf(ans, "%s ", sec);
+}
+
+static int
+ptp_ocp_summary_show(struct seq_file *s, void *data)
+{
+ struct device *dev = s->private;
+ struct ptp_system_timestamp sts;
+ u32 sma_in, sma_out, ctrl, val;
+ struct ts_reg __iomem *ts_reg;
+ struct timespec64 ts;
+ struct ptp_ocp *bp;
+ const char *src;
+ bool on, map;
+ char *buf;
+
+ buf = (char *)__get_free_page(GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ bp = dev_get_drvdata(dev);
+ sma_in = ioread32(&bp->sma->gpio1);
+ sma_out = ioread32(&bp->sma->gpio2);
+
+ seq_printf(s, "%7s: /dev/ptp%d\n", "PTP", ptp_clock_index(bp->ptp));
+
+ sma1_show(dev, NULL, buf);
+ seq_printf(s, " sma1: %s", buf);
+
+ sma2_show(dev, NULL, buf);
+ seq_printf(s, " sma2: %s", buf);
+
+ sma3_show(dev, NULL, buf);
+ seq_printf(s, " sma3: %s", buf);
+
+ sma4_show(dev, NULL, buf);
+ seq_printf(s, " sma4: %s", buf);
+
+ if (bp->ts0) {
+ ts_reg = bp->ts0->mem;
+ on = ioread32(&ts_reg->enable);
+ src = "GNSS";
+ seq_printf(s, "%7s: %s, src: %s\n", "TS0",
+ on ? " ON" : "OFF", src);
+ }
+
+ if (bp->ts1) {
+ ts_reg = bp->ts1->mem;
+ on = ioread32(&ts_reg->enable);
+ src = gpio_map(sma_in, 2, "sma1", "sma2", "----");
+ seq_printf(s, "%7s: %s, src: %s\n", "TS1",
+ on ? " ON" : "OFF", src);
+ }
+
+ if (bp->ts2) {
+ ts_reg = bp->ts2->mem;
+ on = ioread32(&ts_reg->enable);
+ src = gpio_map(sma_in, 3, "sma1", "sma2", "----");
+ seq_printf(s, "%7s: %s, src: %s\n", "TS2",
+ on ? " ON" : "OFF", src);
+ }
+
+ if (bp->pps) {
+ ts_reg = bp->pps->mem;
+ src = "PHC";
+ on = ioread32(&ts_reg->enable);
+ map = !!(bp->pps_req_map & OCP_REQ_TIMESTAMP);
+ seq_printf(s, "%7s: %s, src: %s\n", "TS3",
+ on && map ? " ON" : "OFF", src);
+
+ map = !!(bp->pps_req_map & OCP_REQ_PPS);
+ seq_printf(s, "%7s: %s, src: %s\n", "PPS",
+ on && map ? " ON" : "OFF", src);
+ }
+
+ if (bp->irig_out) {
+ ctrl = ioread32(&bp->irig_out->ctrl);
+ on = ctrl & IRIG_M_CTRL_ENABLE;
+ val = ioread32(&bp->irig_out->status);
+ gpio_multi_map(buf, sma_out, 4, "sma3", "sma4", "----");
+ seq_printf(s, "%7s: %s, error: %d, mode %d, out: %s\n", "IRIG",
+ on ? " ON" : "OFF", val, (ctrl >> 16), buf);
+ }
+
+ if (bp->irig_in) {
+ on = ioread32(&bp->irig_in->ctrl) & IRIG_S_CTRL_ENABLE;
+ val = ioread32(&bp->irig_in->status);
+ src = gpio_map(sma_in, 4, "sma1", "sma2", "----");
+ seq_printf(s, "%7s: %s, error: %d, src: %s\n", "IRIG in",
+ on ? " ON" : "OFF", val, src);
+ }
+
+ if (bp->dcf_out) {
+ on = ioread32(&bp->dcf_out->ctrl) & DCF_M_CTRL_ENABLE;
+ val = ioread32(&bp->dcf_out->status);
+ gpio_multi_map(buf, sma_out, 5, "sma3", "sma4", "----");
+ seq_printf(s, "%7s: %s, error: %d, out: %s\n", "DCF",
+ on ? " ON" : "OFF", val, buf);
+ }
+
+ if (bp->dcf_in) {
+ on = ioread32(&bp->dcf_in->ctrl) & DCF_S_CTRL_ENABLE;
+ val = ioread32(&bp->dcf_in->status);
+ src = gpio_map(sma_in, 5, "sma1", "sma2", "----");
+ seq_printf(s, "%7s: %s, error: %d, src: %s\n", "DCF in",
+ on ? " ON" : "OFF", val, src);
+ }
+
+ if (bp->nmea_out) {
+ on = ioread32(&bp->nmea_out->ctrl) & 1;
+ val = ioread32(&bp->nmea_out->status);
+ seq_printf(s, "%7s: %s, error: %d\n", "NMEA",
+ on ? " ON" : "OFF", val);
+ }
+
+ /* compute src for PPS1, used below. */
+ if (bp->pps_select) {
+ val = ioread32(&bp->pps_select->gpio1);
+ if (val & 0x01)
+ src = gpio_map(sma_in, 0, "sma1", "sma2", "----");
+ else if (val & 0x02)
+ src = "MAC";
+ else if (val & 0x04)
+ src = "GNSS";
+ else
+ src = "----";
+ } else {
+ src = "?";
+ }
+
+ /* assumes automatic switchover/selection */
+ val = ioread32(&bp->reg->select);
+ switch (val >> 16) {
+ case 0:
+ sprintf(buf, "----");
+ break;
+ case 2:
+ sprintf(buf, "IRIG");
+ break;
+ case 3:
+ sprintf(buf, "%s via PPS1", src);
+ break;
+ case 6:
+ sprintf(buf, "DCF");
+ break;
+ default:
+ strcpy(buf, "unknown");
+ break;
+ }
+ val = ioread32(&bp->reg->status);
+ seq_printf(s, "%7s: %s, state: %s\n", "PHC src", buf,
+ val & OCP_STATUS_IN_SYNC ? "sync" : "unsynced");
+
+ /* reuses PPS1 src from earlier */
+ seq_printf(s, "MAC PPS1 src: %s\n", src);
+
+ src = gpio_map(sma_in, 1, "sma1", "sma2", "GNSS2");
+ seq_printf(s, "MAC PPS2 src: %s\n", src);
+
+ if (!ptp_ocp_gettimex(&bp->ptp_info, &ts, &sts)) {
+ struct timespec64 sys_ts;
+ s64 pre_ns, post_ns, ns;
+
+ pre_ns = timespec64_to_ns(&sts.pre_ts);
+ post_ns = timespec64_to_ns(&sts.post_ts);
+ ns = (pre_ns + post_ns) / 2;
+ ns += (s64)bp->utc_tai_offset * NSEC_PER_SEC;
+ sys_ts = ns_to_timespec64(ns);
+
+ seq_printf(s, "%7s: %lld.%ld == %ptT TAI\n", "PHC",
+ ts.tv_sec, ts.tv_nsec, &ts);
+ seq_printf(s, "%7s: %lld.%ld == %ptT UTC offset %d\n", "SYS",
+ sys_ts.tv_sec, sys_ts.tv_nsec, &sys_ts,
+ bp->utc_tai_offset);
+ seq_printf(s, "%7s: PHC:SYS offset: %lld window: %lld\n", "",
+ timespec64_to_ns(&ts) - ns,
+ post_ns - pre_ns);
+ }
+
+ free_page((unsigned long)buf);
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(ptp_ocp_summary);
+
+static struct dentry *ptp_ocp_debugfs_root;
+
+static void
+ptp_ocp_debugfs_add_device(struct ptp_ocp *bp)
+{
+ struct dentry *d;
+
+ d = debugfs_create_dir(dev_name(&bp->dev), ptp_ocp_debugfs_root);
+ bp->debug_root = d;
+ debugfs_create_file("summary", 0444, bp->debug_root,
+ &bp->dev, &ptp_ocp_summary_fops);
+}
+
+static void
+ptp_ocp_debugfs_remove_device(struct ptp_ocp *bp)
+{
+ debugfs_remove_recursive(bp->debug_root);
+}
+
+static void
+ptp_ocp_debugfs_init(void)
+{
+ ptp_ocp_debugfs_root = debugfs_create_dir("timecard", NULL);
+}
+
+static void
+ptp_ocp_debugfs_fini(void)
+{
+ debugfs_remove_recursive(ptp_ocp_debugfs_root);
+}
+
static void
ptp_ocp_dev_release(struct device *dev)
{
@@ -1270,7 +2236,9 @@ ptp_ocp_device_init(struct ptp_ocp *bp, struct pci_dev *pdev)
bp->ptp_info = ptp_ocp_clock_info;
spin_lock_init(&bp->lock);
bp->gnss_port = -1;
+ bp->gnss2_port = -1;
bp->mac_port = -1;
+ bp->nmea_port = -1;
bp->pdev = pdev;
device_initialize(&bp->dev);
@@ -1332,10 +2300,18 @@ ptp_ocp_complete(struct ptp_ocp *bp)
sprintf(buf, "ttyS%d", bp->gnss_port);
ptp_ocp_link_child(bp, buf, "ttyGNSS");
}
+ if (bp->gnss2_port != -1) {
+ sprintf(buf, "ttyS%d", bp->gnss2_port);
+ ptp_ocp_link_child(bp, buf, "ttyGNSS2");
+ }
if (bp->mac_port != -1) {
sprintf(buf, "ttyS%d", bp->mac_port);
ptp_ocp_link_child(bp, buf, "ttyMAC");
}
+ if (bp->nmea_port != -1) {
+ sprintf(buf, "ttyS%d", bp->nmea_port);
+ ptp_ocp_link_child(bp, buf, "ttyNMEA");
+ }
sprintf(buf, "ptp%d", ptp_clock_index(bp->ptp));
ptp_ocp_link_child(bp, buf, "ptp");
@@ -1346,13 +2322,53 @@ ptp_ocp_complete(struct ptp_ocp *bp)
if (device_add_groups(&bp->dev, timecard_groups))
pr_err("device add groups failed\n");
+ ptp_ocp_debugfs_add_device(bp);
+
return 0;
}
static void
-ptp_ocp_resource_summary(struct ptp_ocp *bp)
+ptp_ocp_phc_info(struct ptp_ocp *bp)
{
+ struct timespec64 ts;
+ u32 version, select;
+ bool sync;
+
+ version = ioread32(&bp->reg->version);
+ select = ioread32(&bp->reg->select);
+ dev_info(&bp->pdev->dev, "Version %d.%d.%d, clock %s, device ptp%d\n",
+ version >> 24, (version >> 16) & 0xff, version & 0xffff,
+ ptp_ocp_select_name_from_val(ptp_ocp_clock, select >> 16),
+ ptp_clock_index(bp->ptp));
+
+ sync = ioread32(&bp->reg->status) & OCP_STATUS_IN_SYNC;
+ if (!ptp_ocp_gettimex(&bp->ptp_info, &ts, NULL))
+ dev_info(&bp->pdev->dev, "Time: %lld.%ld, %s\n",
+ ts.tv_sec, ts.tv_nsec,
+ sync ? "in-sync" : "UNSYNCED");
+}
+
+static void
+ptp_ocp_serial_info(struct device *dev, const char *name, int port, int baud)
+{
+ if (port != -1)
+ dev_info(dev, "%5s: /dev/ttyS%-2d @ %6d\n", name, port, baud);
+}
+
+static void
+ptp_ocp_info(struct ptp_ocp *bp)
+{
+ static int nmea_baud[] = {
+ 1200, 2400, 4800, 9600, 19200, 38400,
+ 57600, 115200, 230400, 460800, 921600,
+ 1000000, 2000000
+ };
struct device *dev = &bp->pdev->dev;
+ u32 reg;
+
+ ptp_ocp_phc_info(bp);
+ if (bp->tod)
+ ptp_ocp_tod_info(bp);
if (bp->image) {
u32 ver = ioread32(&bp->image->version);
@@ -1365,10 +2381,17 @@ ptp_ocp_resource_summary(struct ptp_ocp *bp)
dev_info(dev, "golden image, version %d\n",
ver >> 16);
}
- if (bp->gnss_port != -1)
- dev_info(dev, "GNSS @ /dev/ttyS%d 115200\n", bp->gnss_port);
- if (bp->mac_port != -1)
- dev_info(dev, "MAC @ /dev/ttyS%d 57600\n", bp->mac_port);
+ ptp_ocp_serial_info(dev, "GNSS", bp->gnss_port, 115200);
+ ptp_ocp_serial_info(dev, "GNSS2", bp->gnss2_port, 115200);
+ ptp_ocp_serial_info(dev, "MAC", bp->mac_port, 57600);
+ if (bp->nmea_out && bp->nmea_port != -1) {
+ int baud = -1;
+
+ reg = ioread32(&bp->nmea_out->uart_baud);
+ if (reg < ARRAY_SIZE(nmea_baud))
+ baud = nmea_baud[reg];
+ ptp_ocp_serial_info(dev, "NMEA", bp->nmea_port, baud);
+ }
}
static void
@@ -1386,6 +2409,7 @@ ptp_ocp_detach_sysfs(struct ptp_ocp *bp)
static void
ptp_ocp_detach(struct ptp_ocp *bp)
{
+ ptp_ocp_debugfs_remove_device(bp);
ptp_ocp_detach_sysfs(bp);
if (timer_pending(&bp->watchdog))
del_timer_sync(&bp->watchdog);
@@ -1393,12 +2417,18 @@ ptp_ocp_detach(struct ptp_ocp *bp)
ptp_ocp_unregister_ext(bp->ts0);
if (bp->ts1)
ptp_ocp_unregister_ext(bp->ts1);
+ if (bp->ts2)
+ ptp_ocp_unregister_ext(bp->ts2);
if (bp->pps)
ptp_ocp_unregister_ext(bp->pps);
if (bp->gnss_port != -1)
serial8250_unregister_port(bp->gnss_port);
+ if (bp->gnss2_port != -1)
+ serial8250_unregister_port(bp->gnss2_port);
if (bp->mac_port != -1)
serial8250_unregister_port(bp->mac_port);
+ if (bp->nmea_port != -1)
+ serial8250_unregister_port(bp->nmea_port);
if (bp->spi_flash)
platform_device_unregister(bp->spi_flash);
if (bp->i2c_ctrl)
@@ -1425,10 +2455,6 @@ ptp_ocp_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return -ENOMEM;
}
- err = devlink_register(devlink);
- if (err)
- goto out_free;
-
err = pci_enable_device(pdev);
if (err) {
dev_err(&pdev->dev, "pci_enable_device\n");
@@ -1445,7 +2471,7 @@ ptp_ocp_probe(struct pci_dev *pdev, const struct pci_device_id *id)
* allow this - if not all of the IRQ's are returned, skip the
* extra devices and just register the clock.
*/
- err = pci_alloc_irq_vectors(pdev, 1, 10, PCI_IRQ_MSI | PCI_IRQ_MSIX);
+ err = pci_alloc_irq_vectors(pdev, 1, 11, PCI_IRQ_MSI | PCI_IRQ_MSIX);
if (err < 0) {
dev_err(&pdev->dev, "alloc_irq_vectors err: %d\n", err);
goto out;
@@ -1470,8 +2496,7 @@ ptp_ocp_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto out;
ptp_ocp_info(bp);
- ptp_ocp_resource_summary(bp);
-
+ devlink_register(devlink);
return 0;
out:
@@ -1480,10 +2505,7 @@ out:
out_disable:
pci_disable_device(pdev);
out_unregister:
- devlink_unregister(devlink);
-out_free:
devlink_free(devlink);
-
return err;
}
@@ -1493,11 +2515,11 @@ ptp_ocp_remove(struct pci_dev *pdev)
struct ptp_ocp *bp = pci_get_drvdata(pdev);
struct devlink *devlink = priv_to_devlink(bp);
+ devlink_unregister(devlink);
ptp_ocp_detach(bp);
pci_set_drvdata(pdev, NULL);
pci_disable_device(pdev);
- devlink_unregister(devlink);
devlink_free(devlink);
}
@@ -1554,6 +2576,8 @@ ptp_ocp_init(void)
const char *what;
int err;
+ ptp_ocp_debugfs_init();
+
what = "timecard class";
err = class_register(&timecard_class);
if (err)
@@ -1576,6 +2600,7 @@ out_register:
out_notifier:
class_unregister(&timecard_class);
out:
+ ptp_ocp_debugfs_fini();
pr_err(KBUILD_MODNAME ": failed to register %s: %d\n", what, err);
return err;
}
@@ -1586,6 +2611,7 @@ ptp_ocp_fini(void)
bus_unregister_notifier(&i2c_bus_type, &ptp_ocp_i2c_notifier);
pci_unregister_driver(&ptp_ocp_driver);
class_unregister(&timecard_class);
+ ptp_ocp_debugfs_fini();
}
module_init(ptp_ocp_init);
diff --git a/drivers/s390/net/ctcm_fsms.c b/drivers/s390/net/ctcm_fsms.c
index 06281a0a0552..de2423c72b02 100644
--- a/drivers/s390/net/ctcm_fsms.c
+++ b/drivers/s390/net/ctcm_fsms.c
@@ -182,7 +182,7 @@ static void ctcmpc_chx_attnbusy(fsm_instance *, int, void *);
static void ctcmpc_chx_resend(fsm_instance *, int, void *);
static void ctcmpc_chx_send_sweep(fsm_instance *fsm, int event, void *arg);
-/**
+/*
* Check return code of a preceding ccw_device call, halt_IO etc...
*
* ch : The channel, the error belongs to.
@@ -223,7 +223,7 @@ void ctcm_purge_skb_queue(struct sk_buff_head *q)
}
}
-/**
+/*
* NOP action for statemachines
*/
static void ctcm_action_nop(fsm_instance *fi, int event, void *arg)
@@ -234,7 +234,7 @@ static void ctcm_action_nop(fsm_instance *fi, int event, void *arg)
* Actions for channel - statemachines.
*/
-/**
+/*
* Normal data has been send. Free the corresponding
* skb (it's in io_queue), reset dev->tbusy and
* revert to idle state.
@@ -322,7 +322,7 @@ static void chx_txdone(fsm_instance *fi, int event, void *arg)
ctcm_clear_busy_do(dev);
}
-/**
+/*
* Initial data is sent.
* Notify device statemachine that we are up and
* running.
@@ -344,7 +344,7 @@ void ctcm_chx_txidle(fsm_instance *fi, int event, void *arg)
fsm_event(priv->fsm, DEV_EVENT_TXUP, ch->netdev);
}
-/**
+/*
* Got normal data, check for sanity, queue it up, allocate new buffer
* trigger bottom half, and initiate next read.
*
@@ -421,7 +421,7 @@ static void chx_rx(fsm_instance *fi, int event, void *arg)
ctcm_ccw_check_rc(ch, rc, "normal RX");
}
-/**
+/*
* Initialize connection by sending a __u16 of value 0.
*
* fi An instance of a channel statemachine.
@@ -497,7 +497,7 @@ static void chx_firstio(fsm_instance *fi, int event, void *arg)
}
}
-/**
+/*
* Got initial data, check it. If OK,
* notify device statemachine that we are up and
* running.
@@ -538,7 +538,7 @@ static void chx_rxidle(fsm_instance *fi, int event, void *arg)
}
}
-/**
+/*
* Set channel into extended mode.
*
* fi An instance of a channel statemachine.
@@ -578,7 +578,7 @@ static void ctcm_chx_setmode(fsm_instance *fi, int event, void *arg)
ch->retry = 0;
}
-/**
+/*
* Setup channel.
*
* fi An instance of a channel statemachine.
@@ -641,7 +641,7 @@ static void ctcm_chx_start(fsm_instance *fi, int event, void *arg)
}
}
-/**
+/*
* Shutdown a channel.
*
* fi An instance of a channel statemachine.
@@ -682,7 +682,7 @@ static void ctcm_chx_haltio(fsm_instance *fi, int event, void *arg)
}
}
-/**
+/*
* Cleanup helper for chx_fail and chx_stopped
* cleanup channels queue and notify interface statemachine.
*
@@ -728,7 +728,7 @@ static void ctcm_chx_cleanup(fsm_instance *fi, int state,
}
}
-/**
+/*
* A channel has successfully been halted.
* Cleanup it's queue and notify interface statemachine.
*
@@ -741,7 +741,7 @@ static void ctcm_chx_stopped(fsm_instance *fi, int event, void *arg)
ctcm_chx_cleanup(fi, CTC_STATE_STOPPED, arg);
}
-/**
+/*
* A stop command from device statemachine arrived and we are in
* not operational mode. Set state to stopped.
*
@@ -754,7 +754,7 @@ static void ctcm_chx_stop(fsm_instance *fi, int event, void *arg)
fsm_newstate(fi, CTC_STATE_STOPPED);
}
-/**
+/*
* A machine check for no path, not operational status or gone device has
* happened.
* Cleanup queue and notify interface statemachine.
@@ -768,7 +768,7 @@ static void ctcm_chx_fail(fsm_instance *fi, int event, void *arg)
ctcm_chx_cleanup(fi, CTC_STATE_NOTOP, arg);
}
-/**
+/*
* Handle error during setup of channel.
*
* fi An instance of a channel statemachine.
@@ -817,7 +817,7 @@ static void ctcm_chx_setuperr(fsm_instance *fi, int event, void *arg)
}
}
-/**
+/*
* Restart a channel after an error.
*
* fi An instance of a channel statemachine.
@@ -858,7 +858,7 @@ static void ctcm_chx_restart(fsm_instance *fi, int event, void *arg)
}
}
-/**
+/*
* Handle error during RX initial handshake (exchange of
* 0-length block header)
*
@@ -893,7 +893,7 @@ static void ctcm_chx_rxiniterr(fsm_instance *fi, int event, void *arg)
}
}
-/**
+/*
* Notify device statemachine if we gave up initialization
* of RX channel.
*
@@ -914,7 +914,7 @@ static void ctcm_chx_rxinitfail(fsm_instance *fi, int event, void *arg)
fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
}
-/**
+/*
* Handle RX Unit check remote reset (remote disconnected)
*
* fi An instance of a channel statemachine.
@@ -946,7 +946,7 @@ static void ctcm_chx_rxdisc(fsm_instance *fi, int event, void *arg)
ccw_device_halt(ch2->cdev, 0);
}
-/**
+/*
* Handle error during TX channel initialization.
*
* fi An instance of a channel statemachine.
@@ -978,7 +978,7 @@ static void ctcm_chx_txiniterr(fsm_instance *fi, int event, void *arg)
}
}
-/**
+/*
* Handle TX timeout by retrying operation.
*
* fi An instance of a channel statemachine.
@@ -1050,7 +1050,7 @@ done:
return;
}
-/**
+/*
* Handle fatal errors during an I/O command.
*
* fi An instance of a channel statemachine.
@@ -1198,7 +1198,7 @@ int ch_fsm_len = ARRAY_SIZE(ch_fsm);
* Actions for mpc channel statemachine.
*/
-/**
+/*
* Normal data has been send. Free the corresponding
* skb (it's in io_queue), reset dev->tbusy and
* revert to idle state.
@@ -1361,7 +1361,7 @@ done:
return;
}
-/**
+/*
* Got normal data, check for sanity, queue it up, allocate new buffer
* trigger bottom half, and initiate next read.
*
@@ -1464,7 +1464,7 @@ again:
}
-/**
+/*
* Initialize connection by sending a __u16 of value 0.
*
* fi An instance of a channel statemachine.
@@ -1516,7 +1516,7 @@ done:
return;
}
-/**
+/*
* Got initial data, check it. If OK,
* notify device statemachine that we are up and
* running.
@@ -2043,7 +2043,7 @@ int mpc_ch_fsm_len = ARRAY_SIZE(ctcmpc_ch_fsm);
* Actions for interface - statemachine.
*/
-/**
+/*
* Startup channels by sending CTC_EVENT_START to each channel.
*
* fi An instance of an interface statemachine.
@@ -2068,7 +2068,7 @@ static void dev_action_start(fsm_instance *fi, int event, void *arg)
}
}
-/**
+/*
* Shutdown channels by sending CTC_EVENT_STOP to each channel.
*
* fi An instance of an interface statemachine.
@@ -2122,7 +2122,7 @@ static void dev_action_restart(fsm_instance *fi, int event, void *arg)
DEV_EVENT_START, dev);
}
-/**
+/*
* Called from channel statemachine
* when a channel is up and running.
*
@@ -2183,7 +2183,7 @@ static void dev_action_chup(fsm_instance *fi, int event, void *arg)
}
}
-/**
+/*
* Called from device statemachine
* when a channel has been shutdown.
*
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c
index fd705429708e..5ea7eeb07002 100644
--- a/drivers/s390/net/ctcm_main.c
+++ b/drivers/s390/net/ctcm_main.c
@@ -55,7 +55,7 @@
/* Some common global variables */
-/**
+/*
* The root device for ctcm group devices
*/
static struct device *ctcm_root_dev;
@@ -65,7 +65,7 @@ static struct device *ctcm_root_dev;
*/
struct channel *channels;
-/**
+/*
* Unpack a just received skb and hand it over to
* upper layers.
*
@@ -180,7 +180,7 @@ void ctcm_unpack_skb(struct channel *ch, struct sk_buff *pskb)
}
}
-/**
+/*
* Release a specific channel in the channel list.
*
* ch Pointer to channel struct to be released.
@@ -192,7 +192,7 @@ static void channel_free(struct channel *ch)
fsm_newstate(ch->fsm, CTC_STATE_IDLE);
}
-/**
+/*
* Remove a specific channel in the channel list.
*
* ch Pointer to channel struct to be released.
@@ -240,7 +240,7 @@ static void channel_remove(struct channel *ch)
chid, ok ? "OK" : "failed");
}
-/**
+/*
* Get a specific channel from the channel list.
*
* type Type of channel we are interested in.
@@ -300,7 +300,7 @@ static long ctcm_check_irb_error(struct ccw_device *cdev, struct irb *irb)
}
-/**
+/*
* Check sense of a unit check.
*
* ch The channel, the sense code belongs to.
@@ -414,7 +414,7 @@ int ctcm_ch_alloc_buffer(struct channel *ch)
* Interface API for upper network layers
*/
-/**
+/*
* Open an interface.
* Called from generic network layer when ifconfig up is run.
*
@@ -432,7 +432,7 @@ int ctcm_open(struct net_device *dev)
return 0;
}
-/**
+/*
* Close an interface.
* Called from generic network layer when ifconfig down is run.
*
@@ -451,7 +451,7 @@ int ctcm_close(struct net_device *dev)
}
-/**
+/*
* Transmit a packet.
* This is a helper function for ctcm_tx().
*
@@ -822,7 +822,7 @@ done:
return rc;
}
-/**
+/*
* Start transmission of a packet.
* Called from generic network device layer.
*
@@ -975,7 +975,7 @@ done:
}
-/**
+/*
* Sets MTU of an interface.
*
* dev Pointer to interface struct.
@@ -1007,7 +1007,7 @@ static int ctcm_change_mtu(struct net_device *dev, int new_mtu)
return 0;
}
-/**
+/*
* Returns interface statistics of a device.
*
* dev Pointer to interface struct.
@@ -1144,7 +1144,7 @@ static struct net_device *ctcm_init_netdevice(struct ctcm_priv *priv)
return dev;
}
-/**
+/*
* Main IRQ handler.
*
* cdev The ccw_device the interrupt is for.
@@ -1257,7 +1257,7 @@ static const struct device_type ctcm_devtype = {
.groups = ctcm_attr_groups,
};
-/**
+/*
* Add ctcm specific attributes.
* Add ctcm private data.
*
@@ -1293,7 +1293,7 @@ static int ctcm_probe_device(struct ccwgroup_device *cgdev)
return 0;
}
-/**
+/*
* Add a new channel to the list of channels.
* Keeps the channel list sorted.
*
@@ -1343,7 +1343,7 @@ static int add_channel(struct ccw_device *cdev, enum ctcm_channel_types type,
snprintf(ch->id, CTCM_ID_SIZE, "ch-%s", dev_name(&cdev->dev));
ch->type = type;
- /**
+ /*
* "static" ccws are used in the following way:
*
* ccw[0..2] (Channel program for generic I/O):
@@ -1471,7 +1471,7 @@ static enum ctcm_channel_types get_channel_type(struct ccw_device_id *id)
return type;
}
-/**
+/*
*
* Setup an interface.
*
@@ -1595,7 +1595,7 @@ out_err_result:
return result;
}
-/**
+/*
* Shutdown an interface.
*
* cgdev Device to be shut down.
@@ -1738,7 +1738,7 @@ static void print_banner(void)
pr_info("CTCM driver initialized\n");
}
-/**
+/*
* Initialize module.
* This is called just after the module is loaded.
*
diff --git a/drivers/s390/net/ctcm_mpc.c b/drivers/s390/net/ctcm_mpc.c
index f0436f555c62..88abfb5e8045 100644
--- a/drivers/s390/net/ctcm_mpc.c
+++ b/drivers/s390/net/ctcm_mpc.c
@@ -1016,7 +1016,7 @@ done:
CTCM_PR_DEBUG("exit %s: ch=0x%p id=%s\n", __func__, ch, ch->id);
}
-/**
+/*
* Unpack a just received skb and hand it over to
* upper layers.
* special MPC version of unpack_skb.
@@ -1211,7 +1211,7 @@ done:
__func__, dev->name, ch, ch->id);
}
-/**
+/*
* tasklet helper for mpc's skb unpacking.
*
* ch The channel to work on.
@@ -1320,7 +1320,7 @@ struct mpc_group *ctcmpc_init_mpc_group(struct ctcm_priv *priv)
* CTCM_PROTO_MPC only
*/
-/**
+/*
* NOP action for statemachines
*/
static void mpc_action_nop(fsm_instance *fi, int event, void *arg)
@@ -1426,7 +1426,7 @@ static void mpc_action_go_inop(fsm_instance *fi, int event, void *arg)
}
}
-/**
+/*
* Handle mpc group action timeout.
* MPC Group Station FSM action
* CTCM_PROTO_MPC only
diff --git a/drivers/s390/net/fsm.c b/drivers/s390/net/fsm.c
index eb07862bd36a..98c4864932d2 100644
--- a/drivers/s390/net/fsm.c
+++ b/drivers/s390/net/fsm.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/**
+/*
* A generic FSM based on fsm used in isdn4linux
*
*/
diff --git a/drivers/s390/net/ism_drv.c b/drivers/s390/net/ism_drv.c
index 26cc943d2034..5f7e28de8b15 100644
--- a/drivers/s390/net/ism_drv.c
+++ b/drivers/s390/net/ism_drv.c
@@ -555,7 +555,7 @@ static int ism_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (ret)
goto err_disable;
- ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
if (ret)
goto err_resource;
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index 440219bcaa2b..c18fd48e02b6 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -40,18 +40,18 @@
#error Cannot compile lcs.c without some net devices switched on.
#endif
-/**
+/*
* initialization string for output
*/
static char version[] __initdata = "LCS driver";
-/**
+/*
* the root device for lcs group devices
*/
static struct device *lcs_root_dev;
-/**
+/*
* Some prototypes.
*/
static void lcs_tasklet(unsigned long);
@@ -62,14 +62,14 @@ static int lcs_send_delipm(struct lcs_card *, struct lcs_ipm_list *);
#endif /* CONFIG_IP_MULTICAST */
static int lcs_recovery(void *ptr);
-/**
+/*
* Debug Facility Stuff
*/
static char debug_buffer[255];
static debug_info_t *lcs_dbf_setup;
static debug_info_t *lcs_dbf_trace;
-/**
+/*
* LCS Debug Facility functions
*/
static void
@@ -96,7 +96,7 @@ lcs_register_debug_facility(void)
return 0;
}
-/**
+/*
* Allocate io buffers.
*/
static int
@@ -123,7 +123,7 @@ lcs_alloc_channel(struct lcs_channel *channel)
return 0;
}
-/**
+/*
* Free io buffers.
*/
static void
@@ -151,7 +151,7 @@ lcs_cleanup_channel(struct lcs_channel *channel)
lcs_free_channel(channel);
}
-/**
+/*
* LCS free memory for card and channels.
*/
static void
@@ -162,7 +162,7 @@ lcs_free_card(struct lcs_card *card)
kfree(card);
}
-/**
+/*
* LCS alloc memory for card and channels
*/
static struct lcs_card *
@@ -402,7 +402,7 @@ lcs_do_start_thread(struct lcs_card *card, unsigned long thread)
return rc;
}
-/**
+/*
* Initialize channels,card and state machines.
*/
static void
@@ -451,7 +451,8 @@ static void lcs_clear_multicast_list(struct lcs_card *card)
spin_unlock_irqrestore(&card->ipm_lock, flags);
#endif
}
-/**
+
+/*
* Cleanup channels,card and state machines.
*/
static void
@@ -468,7 +469,7 @@ lcs_cleanup_card(struct lcs_card *card)
lcs_cleanup_channel(&card->read);
}
-/**
+/*
* Start channel.
*/
static int
@@ -517,7 +518,7 @@ lcs_clear_channel(struct lcs_channel *channel)
}
-/**
+/*
* Stop channel.
*/
static int
@@ -545,7 +546,7 @@ lcs_stop_channel(struct lcs_channel *channel)
return 0;
}
-/**
+/*
* start read and write channel
*/
static int
@@ -565,7 +566,7 @@ lcs_start_channels(struct lcs_card *card)
return rc;
}
-/**
+/*
* stop read and write channel
*/
static int
@@ -577,7 +578,7 @@ lcs_stop_channels(struct lcs_card *card)
return 0;
}
-/**
+/*
* Get empty buffer.
*/
static struct lcs_buffer *
@@ -610,7 +611,7 @@ lcs_get_buffer(struct lcs_channel *channel)
return buffer;
}
-/**
+/*
* Resume channel program if the channel is suspended.
*/
static int
@@ -636,7 +637,7 @@ __lcs_resume_channel(struct lcs_channel *channel)
}
-/**
+/*
* Make a buffer ready for processing.
*/
static void __lcs_ready_buffer_bits(struct lcs_channel *channel, int index)
@@ -678,7 +679,7 @@ lcs_ready_buffer(struct lcs_channel *channel, struct lcs_buffer *buffer)
return rc;
}
-/**
+/*
* Mark the buffer as processed. Take care of the suspend bit
* of the previous buffer. This function is called from
* interrupt context, so the lock must not be taken.
@@ -712,7 +713,7 @@ __lcs_processed_buffer(struct lcs_channel *channel, struct lcs_buffer *buffer)
return __lcs_resume_channel(channel);
}
-/**
+/*
* Put a processed buffer back to state empty.
*/
static void
@@ -728,7 +729,7 @@ lcs_release_buffer(struct lcs_channel *channel, struct lcs_buffer *buffer)
spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
}
-/**
+/*
* Get buffer for a lan command.
*/
static struct lcs_buffer *
@@ -785,7 +786,7 @@ lcs_alloc_reply(struct lcs_cmd *cmd)
return reply;
}
-/**
+/*
* Notifier function for lancmd replies. Called from read irq.
*/
static void
@@ -813,7 +814,7 @@ lcs_notify_lancmd_waiters(struct lcs_card *card, struct lcs_cmd *cmd)
spin_unlock(&card->lock);
}
-/**
+/*
* Emit buffer of a lan command.
*/
static void
@@ -877,7 +878,7 @@ lcs_send_lancmd(struct lcs_card *card, struct lcs_buffer *buffer,
return rc ? -EIO : 0;
}
-/**
+/*
* LCS startup command
*/
static int
@@ -895,7 +896,7 @@ lcs_send_startup(struct lcs_card *card, __u8 initiator)
return lcs_send_lancmd(card, buffer, NULL);
}
-/**
+/*
* LCS shutdown command
*/
static int
@@ -912,7 +913,7 @@ lcs_send_shutdown(struct lcs_card *card)
return lcs_send_lancmd(card, buffer, NULL);
}
-/**
+/*
* LCS lanstat command
*/
static void
@@ -939,7 +940,7 @@ lcs_send_lanstat(struct lcs_card *card)
return lcs_send_lancmd(card, buffer, __lcs_lanstat_cb);
}
-/**
+/*
* send stoplan command
*/
static int
@@ -958,7 +959,7 @@ lcs_send_stoplan(struct lcs_card *card, __u8 initiator)
return lcs_send_lancmd(card, buffer, NULL);
}
-/**
+/*
* send startlan command
*/
static void
@@ -986,7 +987,7 @@ lcs_send_startlan(struct lcs_card *card, __u8 initiator)
}
#ifdef CONFIG_IP_MULTICAST
-/**
+/*
* send setipm command (Multicast)
*/
static int
@@ -1010,7 +1011,7 @@ lcs_send_setipm(struct lcs_card *card,struct lcs_ipm_list *ipm_list)
return lcs_send_lancmd(card, buffer, NULL);
}
-/**
+/*
* send delipm command (Multicast)
*/
static int
@@ -1034,7 +1035,7 @@ lcs_send_delipm(struct lcs_card *card,struct lcs_ipm_list *ipm_list)
return lcs_send_lancmd(card, buffer, NULL);
}
-/**
+/*
* check if multicast is supported by LCS
*/
static void
@@ -1074,7 +1075,7 @@ lcs_check_multicast_support(struct lcs_card *card)
return -EOPNOTSUPP;
}
-/**
+/*
* set or del multicast address on LCS card
*/
static void
@@ -1129,7 +1130,7 @@ list_modified:
spin_unlock_irqrestore(&card->ipm_lock, flags);
}
-/**
+/*
* get mac address for the relevant Multicast address
*/
static void
@@ -1139,7 +1140,7 @@ lcs_get_mac_for_ipm(__be32 ipm, char *mac, struct net_device *dev)
ip_eth_mc_map(ipm, mac);
}
-/**
+/*
* function called by net device to handle multicast address relevant things
*/
static void lcs_remove_mc_addresses(struct lcs_card *card,
@@ -1260,7 +1261,7 @@ out:
}
#endif /* CONFIG_IP_MULTICAST */
-/**
+/*
* function called by net device to
* handle multicast address relevant things
*/
@@ -1355,7 +1356,7 @@ lcs_schedule_recovery(struct lcs_card *card)
schedule_work(&card->kernel_thread_starter);
}
-/**
+/*
* IRQ Handler for LCS channels
*/
static void
@@ -1439,7 +1440,7 @@ lcs_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
tasklet_schedule(&channel->irq_tasklet);
}
-/**
+/*
* Tasklet for IRQ handler
*/
static void
@@ -1476,7 +1477,7 @@ lcs_tasklet(unsigned long data)
wake_up(&channel->wait_q);
}
-/**
+/*
* Finish current tx buffer and make it ready for transmit.
*/
static void
@@ -1490,7 +1491,7 @@ __lcs_emit_txbuffer(struct lcs_card *card)
card->tx_emitted++;
}
-/**
+/*
* Callback for finished tx buffers.
*/
static void
@@ -1515,7 +1516,7 @@ lcs_txbuffer_cb(struct lcs_channel *channel, struct lcs_buffer *buffer)
spin_unlock(&card->lock);
}
-/**
+/*
* Packet transmit function called by network stack
*/
static int
@@ -1593,7 +1594,7 @@ lcs_start_xmit(struct sk_buff *skb, struct net_device *dev)
return rc;
}
-/**
+/*
* send startlan and lanstat command to make LCS device ready
*/
static int
@@ -1648,7 +1649,7 @@ lcs_startlan(struct lcs_card *card)
return rc;
}
-/**
+/*
* LCS detect function
* setup channels and make them I/O ready
*/
@@ -1680,7 +1681,7 @@ lcs_detect(struct lcs_card *card)
return rc;
}
-/**
+/*
* LCS Stop card
*/
static int
@@ -1705,7 +1706,7 @@ lcs_stopcard(struct lcs_card *card)
return rc;
}
-/**
+/*
* Kernel Thread helper functions for LGW initiated commands
*/
static void
@@ -1721,7 +1722,7 @@ lcs_start_kernel_thread(struct work_struct *work)
#endif
}
-/**
+/*
* Process control frames.
*/
static void
@@ -1748,7 +1749,7 @@ lcs_get_control(struct lcs_card *card, struct lcs_cmd *cmd)
lcs_notify_lancmd_waiters(card, cmd);
}
-/**
+/*
* Unpack network packet.
*/
static void
@@ -1779,7 +1780,7 @@ lcs_get_skb(struct lcs_card *card, char *skb_data, unsigned int skb_len)
netif_rx(skb);
}
-/**
+/*
* LCS main routine to get packets and lancmd replies from the buffers
*/
static void
@@ -1829,7 +1830,7 @@ lcs_get_frames_cb(struct lcs_channel *channel, struct lcs_buffer *buffer)
lcs_ready_buffer(&card->read, buffer);
}
-/**
+/*
* get network statistics for ifconfig and other user programs
*/
static struct net_device_stats *
@@ -1842,7 +1843,7 @@ lcs_getstats(struct net_device *dev)
return &card->stats;
}
-/**
+/*
* stop lcs device
* This function will be called by user doing ifconfig xxx down
*/
@@ -1866,7 +1867,7 @@ lcs_stop_device(struct net_device *dev)
return rc;
}
-/**
+/*
* start lcs device and make it runnable
* This function will be called by user doing ifconfig xxx up
*/
@@ -1892,7 +1893,7 @@ lcs_open_device(struct net_device *dev)
return rc;
}
-/**
+/*
* show function for portno called by cat or similar things
*/
static ssize_t
@@ -1908,7 +1909,7 @@ lcs_portno_show (struct device *dev, struct device_attribute *attr, char *buf)
return sprintf(buf, "%d\n", card->portno);
}
-/**
+/*
* store the value which is piped to file portno
*/
static ssize_t
@@ -2033,7 +2034,7 @@ static const struct device_type lcs_devtype = {
.groups = lcs_attr_groups,
};
-/**
+/*
* lcs_probe_device is called on establishing a new ccwgroup_device.
*/
static int
@@ -2077,7 +2078,7 @@ lcs_register_netdev(struct ccwgroup_device *ccwgdev)
return register_netdev(card->dev);
}
-/**
+/*
* lcs_new_device will be called by setting the group device online.
*/
static const struct net_device_ops lcs_netdev_ops = {
@@ -2199,7 +2200,7 @@ out_err:
return -ENODEV;
}
-/**
+/*
* lcs_shutdown_device, called when setting the group device offline.
*/
static int
@@ -2240,7 +2241,7 @@ lcs_shutdown_device(struct ccwgroup_device *ccwgdev)
return __lcs_shutdown_device(ccwgdev, 0);
}
-/**
+/*
* drive lcs recovery after startup and startlan initiated by Lan Gateway
*/
static int
@@ -2271,7 +2272,7 @@ lcs_recovery(void *ptr)
return 0;
}
-/**
+/*
* lcs_remove_device, free buffers and card
*/
static void
@@ -2315,7 +2316,7 @@ static struct ccw_driver lcs_ccw_driver = {
.int_class = IRQIO_LCS,
};
-/**
+/*
* LCS ccwgroup driver registration
*/
static struct ccwgroup_driver lcs_group_driver = {
@@ -2351,7 +2352,7 @@ static const struct attribute_group *lcs_drv_attr_groups[] = {
NULL,
};
-/**
+/*
* LCS Module/Kernel initialization function
*/
static int
@@ -2389,7 +2390,7 @@ out_err:
}
-/**
+/*
* LCS module cleanup function
*/
static void
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index 5a0c2f07a3a2..981e7b1c6b96 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -58,7 +58,7 @@ MODULE_AUTHOR
("(C) 2001 IBM Corporation by Fritz Elfert (felfert@millenux.com)");
MODULE_DESCRIPTION ("Linux for S/390 IUCV network driver");
-/**
+/*
* Debug Facility stuff
*/
#define IUCV_DBF_SETUP_NAME "iucv_setup"
@@ -107,7 +107,7 @@ DECLARE_PER_CPU(char[256], iucv_dbf_txt_buf);
debug_sprintf_event(iucv_dbf_trace, level, text ); \
} while (0)
-/**
+/*
* some more debug stuff
*/
#define PRINTK_HEADER " iucv: " /* for debugging */
@@ -118,7 +118,7 @@ static struct device_driver netiucv_driver = {
.bus = &iucv_bus,
};
-/**
+/*
* Per connection profiling data
*/
struct connection_profile {
@@ -133,7 +133,7 @@ struct connection_profile {
unsigned long tx_max_pending;
};
-/**
+/*
* Representation of one iucv connection
*/
struct iucv_connection {
@@ -154,13 +154,13 @@ struct iucv_connection {
char userdata[17];
};
-/**
+/*
* Linked list of all connection structs.
*/
static LIST_HEAD(iucv_connection_list);
static DEFINE_RWLOCK(iucv_connection_rwlock);
-/**
+/*
* Representation of event-data for the
* connection state machine.
*/
@@ -169,7 +169,7 @@ struct iucv_event {
void *data;
};
-/**
+/*
* Private part of the network device structure
*/
struct netiucv_priv {
@@ -180,7 +180,7 @@ struct netiucv_priv {
struct device *dev;
};
-/**
+/*
* Link level header for a packet.
*/
struct ll_header {
@@ -195,7 +195,7 @@ struct ll_header {
#define NETIUCV_QUEUELEN_DEFAULT 50
#define NETIUCV_TIMEOUT_5SEC 5000
-/**
+/*
* Compatibility macros for busy handling
* of network devices.
*/
@@ -223,7 +223,7 @@ static u8 iucvMagic_ebcdic[16] = {
0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40
};
-/**
+/*
* Convert an iucv userId to its printable
* form (strip whitespace at end).
*
@@ -262,7 +262,7 @@ static char *netiucv_printuser(struct iucv_connection *conn)
return netiucv_printname(conn->userid, 8);
}
-/**
+/*
* States of the interface statemachine.
*/
enum dev_states {
@@ -270,7 +270,7 @@ enum dev_states {
DEV_STATE_STARTWAIT,
DEV_STATE_STOPWAIT,
DEV_STATE_RUNNING,
- /**
+ /*
* MUST be always the last element!!
*/
NR_DEV_STATES
@@ -283,7 +283,7 @@ static const char *dev_state_names[] = {
"Running",
};
-/**
+/*
* Events of the interface statemachine.
*/
enum dev_events {
@@ -291,7 +291,7 @@ enum dev_events {
DEV_EVENT_STOP,
DEV_EVENT_CONUP,
DEV_EVENT_CONDOWN,
- /**
+ /*
* MUST be always the last element!!
*/
NR_DEV_EVENTS
@@ -304,11 +304,11 @@ static const char *dev_event_names[] = {
"Connection down",
};
-/**
+/*
* Events of the connection statemachine
*/
enum conn_events {
- /**
+ /*
* Events, representing callbacks from
* lowlevel iucv layer)
*/
@@ -320,23 +320,23 @@ enum conn_events {
CONN_EVENT_RX,
CONN_EVENT_TXDONE,
- /**
+ /*
* Events, representing errors return codes from
* calls to lowlevel iucv layer
*/
- /**
+ /*
* Event, representing timer expiry.
*/
CONN_EVENT_TIMER,
- /**
+ /*
* Events, representing commands from upper levels.
*/
CONN_EVENT_START,
CONN_EVENT_STOP,
- /**
+ /*
* MUST be always the last element!!
*/
NR_CONN_EVENTS,
@@ -357,55 +357,55 @@ static const char *conn_event_names[] = {
"Stop",
};
-/**
+/*
* States of the connection statemachine.
*/
enum conn_states {
- /**
+ /*
* Connection not assigned to any device,
* initial state, invalid
*/
CONN_STATE_INVALID,
- /**
+ /*
* Userid assigned but not operating
*/
CONN_STATE_STOPPED,
- /**
+ /*
* Connection registered,
* no connection request sent yet,
* no connection request received
*/
CONN_STATE_STARTWAIT,
- /**
+ /*
* Connection registered and connection request sent,
* no acknowledge and no connection request received yet.
*/
CONN_STATE_SETUPWAIT,
- /**
+ /*
* Connection up and running idle
*/
CONN_STATE_IDLE,
- /**
+ /*
* Data sent, awaiting CONN_EVENT_TXDONE
*/
CONN_STATE_TX,
- /**
+ /*
* Error during registration.
*/
CONN_STATE_REGERR,
- /**
+ /*
* Error during registration.
*/
CONN_STATE_CONNERR,
- /**
+ /*
* MUST be always the last element!!
*/
NR_CONN_STATES,
@@ -424,7 +424,7 @@ static const char *conn_state_names[] = {
};
-/**
+/*
* Debug Facility Stuff
*/
static debug_info_t *iucv_dbf_setup = NULL;
@@ -556,7 +556,7 @@ static void netiucv_callback_connres(struct iucv_path *path, u8 *ipuser)
fsm_event(conn->fsm, CONN_EVENT_CONN_RES, conn);
}
-/**
+/*
* NOP action for statemachines
*/
static void netiucv_action_nop(fsm_instance *fi, int event, void *arg)
@@ -567,7 +567,7 @@ static void netiucv_action_nop(fsm_instance *fi, int event, void *arg)
* Actions of the connection statemachine
*/
-/**
+/*
* netiucv_unpack_skb
* @conn: The connection where this skb has been received.
* @pskb: The received skb.
@@ -993,7 +993,7 @@ static const int CONN_FSM_LEN = sizeof(conn_fsm) / sizeof(fsm_node);
* Actions for interface - statemachine.
*/
-/**
+/*
* dev_action_start
* @fi: An instance of an interface statemachine.
* @event: The event, just happened.
@@ -1012,7 +1012,7 @@ static void dev_action_start(fsm_instance *fi, int event, void *arg)
fsm_event(privptr->conn->fsm, CONN_EVENT_START, privptr->conn);
}
-/**
+/*
* Shutdown connection by sending CONN_EVENT_STOP to it.
*
* @param fi An instance of an interface statemachine.
@@ -1034,7 +1034,7 @@ dev_action_stop(fsm_instance *fi, int event, void *arg)
fsm_event(privptr->conn->fsm, CONN_EVENT_STOP, &ev);
}
-/**
+/*
* Called from connection statemachine
* when a connection is up and running.
*
@@ -1067,7 +1067,7 @@ dev_action_connup(fsm_instance *fi, int event, void *arg)
}
}
-/**
+/*
* Called from connection statemachine
* when a connection has been shutdown.
*
@@ -1107,7 +1107,7 @@ static const fsm_node dev_fsm[] = {
static const int DEV_FSM_LEN = sizeof(dev_fsm) / sizeof(fsm_node);
-/**
+/*
* Transmit a packet.
* This is a helper function for netiucv_tx().
*
@@ -1144,7 +1144,7 @@ static int netiucv_transmit_skb(struct iucv_connection *conn,
spin_unlock_irqrestore(&conn->collect_lock, saveflags);
} else {
struct sk_buff *nskb = skb;
- /**
+ /*
* Copy the skb to a new allocated skb in lowmem only if the
* data is located above 2G in memory or tailroom is < 2.
*/
@@ -1164,7 +1164,7 @@ static int netiucv_transmit_skb(struct iucv_connection *conn,
}
copied = 1;
}
- /**
+ /*
* skb now is below 2G and has enough room. Add headers.
*/
header.next = nskb->len + NETIUCV_HDRLEN;
@@ -1194,7 +1194,7 @@ static int netiucv_transmit_skb(struct iucv_connection *conn,
if (copied)
dev_kfree_skb(nskb);
else {
- /**
+ /*
* Remove our headers. They get added
* again on retransmit.
*/
@@ -1217,7 +1217,7 @@ static int netiucv_transmit_skb(struct iucv_connection *conn,
* Interface API for upper network layers
*/
-/**
+/*
* Open an interface.
* Called from generic network layer when ifconfig up is run.
*
@@ -1233,7 +1233,7 @@ static int netiucv_open(struct net_device *dev)
return 0;
}
-/**
+/*
* Close an interface.
* Called from generic network layer when ifconfig down is run.
*
@@ -1249,7 +1249,7 @@ static int netiucv_close(struct net_device *dev)
return 0;
}
-/**
+/*
* Start transmission of a packet.
* Called from generic network device layer.
*
@@ -1266,7 +1266,7 @@ static int netiucv_tx(struct sk_buff *skb, struct net_device *dev)
int rc;
IUCV_DBF_TEXT(trace, 4, __func__);
- /**
+ /*
* Some sanity checks ...
*/
if (skb == NULL) {
@@ -1282,7 +1282,7 @@ static int netiucv_tx(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
- /**
+ /*
* If connection is not running, try to restart it
* and throw away packet.
*/
@@ -1304,7 +1304,7 @@ static int netiucv_tx(struct sk_buff *skb, struct net_device *dev)
return rc ? NETDEV_TX_BUSY : NETDEV_TX_OK;
}
-/**
+/*
* netiucv_stats
* @dev: Pointer to interface struct.
*
@@ -1745,7 +1745,7 @@ static void netiucv_unregister_device(struct device *dev)
device_unregister(dev);
}
-/**
+/*
* Allocate and initialize a new connection structure.
* Add it to the list of netiucv connections;
*/
@@ -1802,7 +1802,7 @@ out:
return NULL;
}
-/**
+/*
* Release a connection structure and remove it from the
* list of netiucv connections.
*/
@@ -1826,7 +1826,7 @@ static void netiucv_remove_connection(struct iucv_connection *conn)
kfree_skb(conn->tx_buff);
}
-/**
+/*
* Release everything of a net device.
*/
static void netiucv_free_netdevice(struct net_device *dev)
@@ -1848,7 +1848,7 @@ static void netiucv_free_netdevice(struct net_device *dev)
}
}
-/**
+/*
* Initialize a net device. (Called from kernel in alloc_netdev())
*/
static const struct net_device_ops netiucv_netdev_ops = {
@@ -1873,7 +1873,7 @@ static void netiucv_setup_netdevice(struct net_device *dev)
dev->netdev_ops = &netiucv_netdev_ops;
}
-/**
+/*
* Allocate and initialize everything of a net device.
*/
static struct net_device *netiucv_init_netdevice(char *username, char *userdata)
diff --git a/drivers/scsi/qedf/drv_fcoe_fw_funcs.c b/drivers/scsi/qedf/drv_fcoe_fw_funcs.c
index 747af96dd15c..e8bc8d9e4583 100644
--- a/drivers/scsi/qedf/drv_fcoe_fw_funcs.c
+++ b/drivers/scsi/qedf/drv_fcoe_fw_funcs.c
@@ -22,9 +22,9 @@ int init_initiator_rw_fcoe_task(struct fcoe_task_params *task_params,
u32 task_retry_id,
u8 fcp_cmd_payload[32])
{
- struct e4_fcoe_task_context *ctx = task_params->context;
+ struct fcoe_task_context *ctx = task_params->context;
const u8 val_byte = ctx->ystorm_ag_context.byte0;
- struct e4_ustorm_fcoe_task_ag_ctx *u_ag_ctx;
+ struct ustorm_fcoe_task_ag_ctx *u_ag_ctx;
struct ystorm_fcoe_task_st_ctx *y_st_ctx;
struct tstorm_fcoe_task_st_ctx *t_st_ctx;
struct mstorm_fcoe_task_st_ctx *m_st_ctx;
@@ -115,9 +115,9 @@ int init_initiator_midpath_unsolicited_fcoe_task(
struct scsi_sgl_task_params *rx_sgl_task_params,
u8 fw_to_place_fc_header)
{
- struct e4_fcoe_task_context *ctx = task_params->context;
+ struct fcoe_task_context *ctx = task_params->context;
const u8 val_byte = ctx->ystorm_ag_context.byte0;
- struct e4_ustorm_fcoe_task_ag_ctx *u_ag_ctx;
+ struct ustorm_fcoe_task_ag_ctx *u_ag_ctx;
struct ystorm_fcoe_task_st_ctx *y_st_ctx;
struct tstorm_fcoe_task_st_ctx *t_st_ctx;
struct mstorm_fcoe_task_st_ctx *m_st_ctx;
diff --git a/drivers/scsi/qedf/drv_fcoe_fw_funcs.h b/drivers/scsi/qedf/drv_fcoe_fw_funcs.h
index 1ee31a5f063b..7125e484bf93 100644
--- a/drivers/scsi/qedf/drv_fcoe_fw_funcs.h
+++ b/drivers/scsi/qedf/drv_fcoe_fw_funcs.h
@@ -10,7 +10,7 @@
struct fcoe_task_params {
/* Output parameter [set/filled by the HSI function] */
- struct e4_fcoe_task_context *context;
+ struct fcoe_task_context *context;
/* Output parameter [set/filled by the HSI function] */
struct fcoe_wqe *sqe;
diff --git a/drivers/scsi/qedf/qedf.h b/drivers/scsi/qedf/qedf.h
index ba94413fe2ea..631a15969d21 100644
--- a/drivers/scsi/qedf/qedf.h
+++ b/drivers/scsi/qedf/qedf.h
@@ -141,7 +141,7 @@ struct qedf_ioreq {
struct completion tm_done;
struct completion abts_done;
struct completion cleanup_done;
- struct e4_fcoe_task_context *task;
+ struct fcoe_task_context *task;
struct fcoe_task_params *task_params;
struct scsi_sgl_task_params *sgl_task_params;
int idx;
@@ -503,7 +503,7 @@ extern void qedf_cmd_timer_set(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
unsigned int timer_msec);
extern int qedf_init_mp_req(struct qedf_ioreq *io_req);
extern void qedf_init_mp_task(struct qedf_ioreq *io_req,
- struct e4_fcoe_task_context *task_ctx, struct fcoe_wqe *sqe);
+ struct fcoe_task_context *task_ctx, struct fcoe_wqe *sqe);
extern u16 qedf_get_sqe_idx(struct qedf_rport *fcport);
extern void qedf_ring_doorbell(struct qedf_rport *fcport);
extern void qedf_process_els_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
diff --git a/drivers/scsi/qedf/qedf_els.c b/drivers/scsi/qedf/qedf_els.c
index 625e58ccb8c8..1ff5bc314fc0 100644
--- a/drivers/scsi/qedf/qedf_els.c
+++ b/drivers/scsi/qedf/qedf_els.c
@@ -16,7 +16,7 @@ static int qedf_initiate_els(struct qedf_rport *fcport, unsigned int op,
struct qedf_ioreq *els_req;
struct qedf_mp_req *mp_req;
struct fc_frame_header *fc_hdr;
- struct e4_fcoe_task_context *task;
+ struct fcoe_task_context *task;
int rc = 0;
uint32_t did, sid;
uint16_t xid;
diff --git a/drivers/scsi/qedf/qedf_io.c b/drivers/scsi/qedf/qedf_io.c
index 3404782988d5..b649f835d436 100644
--- a/drivers/scsi/qedf/qedf_io.c
+++ b/drivers/scsi/qedf/qedf_io.c
@@ -584,7 +584,7 @@ static void qedf_build_fcp_cmnd(struct qedf_ioreq *io_req,
}
static void qedf_init_task(struct qedf_rport *fcport, struct fc_lport *lport,
- struct qedf_ioreq *io_req, struct e4_fcoe_task_context *task_ctx,
+ struct qedf_ioreq *io_req, struct fcoe_task_context *task_ctx,
struct fcoe_wqe *sqe)
{
enum fcoe_task_type task_type;
@@ -602,7 +602,7 @@ static void qedf_init_task(struct qedf_rport *fcport, struct fc_lport *lport,
/* Note init_initiator_rw_fcoe_task memsets the task context */
io_req->task = task_ctx;
- memset(task_ctx, 0, sizeof(struct e4_fcoe_task_context));
+ memset(task_ctx, 0, sizeof(struct fcoe_task_context));
memset(io_req->task_params, 0, sizeof(struct fcoe_task_params));
memset(io_req->sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
@@ -674,7 +674,7 @@ static void qedf_init_task(struct qedf_rport *fcport, struct fc_lport *lport,
}
void qedf_init_mp_task(struct qedf_ioreq *io_req,
- struct e4_fcoe_task_context *task_ctx, struct fcoe_wqe *sqe)
+ struct fcoe_task_context *task_ctx, struct fcoe_wqe *sqe)
{
struct qedf_mp_req *mp_req = &(io_req->mp_req);
struct qedf_rport *fcport = io_req->fcport;
@@ -692,7 +692,7 @@ void qedf_init_mp_task(struct qedf_ioreq *io_req,
memset(&tx_sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
memset(&rx_sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
- memset(task_ctx, 0, sizeof(struct e4_fcoe_task_context));
+ memset(task_ctx, 0, sizeof(struct fcoe_task_context));
memset(&task_fc_hdr, 0, sizeof(struct fcoe_tx_mid_path_params));
/* Setup the task from io_req for easy reference */
@@ -850,7 +850,7 @@ int qedf_post_io_req(struct qedf_rport *fcport, struct qedf_ioreq *io_req)
struct Scsi_Host *host = sc_cmd->device->host;
struct fc_lport *lport = shost_priv(host);
struct qedf_ctx *qedf = lport_priv(lport);
- struct e4_fcoe_task_context *task_ctx;
+ struct fcoe_task_context *task_ctx;
u16 xid;
struct fcoe_wqe *sqe;
u16 sqe_idx;
@@ -2293,7 +2293,7 @@ static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd,
uint8_t tm_flags)
{
struct qedf_ioreq *io_req;
- struct e4_fcoe_task_context *task;
+ struct fcoe_task_context *task;
struct qedf_ctx *qedf = fcport->qedf;
struct fc_lport *lport = qedf->lport;
int rc = 0;
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index 42d0d941dba5..0da32fd3302e 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -2170,7 +2170,7 @@ static bool qedf_fp_has_work(struct qedf_fastpath *fp)
struct qedf_ctx *qedf = fp->qedf;
struct global_queue *que;
struct qed_sb_info *sb_info = fp->sb_info;
- struct status_block_e4 *sb = sb_info->sb_virt;
+ struct status_block *sb = sb_info->sb_virt;
u16 prod_idx;
/* Get the pointer to the global CQ this completion is on */
@@ -2197,7 +2197,7 @@ static bool qedf_process_completions(struct qedf_fastpath *fp)
{
struct qedf_ctx *qedf = fp->qedf;
struct qed_sb_info *sb_info = fp->sb_info;
- struct status_block_e4 *sb = sb_info->sb_virt;
+ struct status_block *sb = sb_info->sb_virt;
struct global_queue *que;
u16 prod_idx;
struct fcoe_cqe *cqe;
@@ -2688,12 +2688,12 @@ void qedf_fp_io_handler(struct work_struct *work)
static int qedf_alloc_and_init_sb(struct qedf_ctx *qedf,
struct qed_sb_info *sb_info, u16 sb_id)
{
- struct status_block_e4 *sb_virt;
+ struct status_block *sb_virt;
dma_addr_t sb_phys;
int ret;
sb_virt = dma_alloc_coherent(&qedf->pdev->dev,
- sizeof(struct status_block_e4), &sb_phys, GFP_KERNEL);
+ sizeof(struct status_block), &sb_phys, GFP_KERNEL);
if (!sb_virt) {
QEDF_ERR(&qedf->dbg_ctx,
@@ -3416,7 +3416,9 @@ retry_probe:
qedf->devlink = qed_ops->common->devlink_register(qedf->cdev);
if (IS_ERR(qedf->devlink)) {
QEDF_ERR(&qedf->dbg_ctx, "Cannot register devlink\n");
+ rc = PTR_ERR(qedf->devlink);
qedf->devlink = NULL;
+ goto err2;
}
}
diff --git a/drivers/scsi/qedi/qedi_debugfs.c b/drivers/scsi/qedi/qedi_debugfs.c
index 42f5afb60055..8deb2001dc2f 100644
--- a/drivers/scsi/qedi/qedi_debugfs.c
+++ b/drivers/scsi/qedi/qedi_debugfs.c
@@ -136,7 +136,7 @@ qedi_gbl_ctx_show(struct seq_file *s, void *unused)
{
struct qedi_fastpath *fp = NULL;
struct qed_sb_info *sb_info = NULL;
- struct status_block_e4 *sb = NULL;
+ struct status_block *sb = NULL;
struct global_queue *que = NULL;
int id;
u16 prod_idx;
@@ -152,7 +152,7 @@ qedi_gbl_ctx_show(struct seq_file *s, void *unused)
sb_info = fp->sb_info;
sb = sb_info->sb_virt;
prod_idx = (sb->pi_array[QEDI_PROTO_CQ_PROD_IDX] &
- STATUS_BLOCK_E4_PROD_INDEX_MASK);
+ STATUS_BLOCK_PROD_INDEX_MASK);
seq_printf(s, "SB PROD IDX: %d\n", prod_idx);
que = qedi->global_queues[fp->sb_id];
seq_printf(s, "DRV CONS IDX: %d\n", que->cq_cons_idx);
diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c
index d01cd829ef97..84a4204a2cb4 100644
--- a/drivers/scsi/qedi/qedi_fw.c
+++ b/drivers/scsi/qedi/qedi_fw.c
@@ -85,7 +85,7 @@ static void qedi_process_text_resp(struct qedi_ctx *qedi,
{
struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
struct iscsi_session *session = conn->session;
- struct e4_iscsi_task_context *task_ctx;
+ struct iscsi_task_context *task_ctx;
struct iscsi_text_rsp *resp_hdr_ptr;
struct iscsi_text_response_hdr *cqe_text_response;
struct qedi_cmd *cmd;
@@ -261,7 +261,7 @@ static void qedi_process_login_resp(struct qedi_ctx *qedi,
{
struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
struct iscsi_session *session = conn->session;
- struct e4_iscsi_task_context *task_ctx;
+ struct iscsi_task_context *task_ctx;
struct iscsi_login_rsp *resp_hdr_ptr;
struct iscsi_login_response_hdr *cqe_login_response;
struct qedi_cmd *cmd;
@@ -970,7 +970,7 @@ int qedi_send_iscsi_login(struct qedi_conn *qedi_conn,
struct scsi_sgl_task_params tx_sgl_task_params;
struct scsi_sgl_task_params rx_sgl_task_params;
struct iscsi_task_params task_params;
- struct e4_iscsi_task_context *fw_task_ctx;
+ struct iscsi_task_context *fw_task_ctx;
struct qedi_ctx *qedi = qedi_conn->qedi;
struct iscsi_login_req *login_hdr;
struct scsi_sge *resp_sge = NULL;
@@ -990,9 +990,9 @@ int qedi_send_iscsi_login(struct qedi_conn *qedi_conn,
return -ENOMEM;
fw_task_ctx =
- (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
+ (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
tid);
- memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context));
+ memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
qedi_cmd->task_id = tid;
@@ -1073,7 +1073,7 @@ int qedi_send_iscsi_logout(struct qedi_conn *qedi_conn,
struct scsi_sgl_task_params tx_sgl_task_params;
struct scsi_sgl_task_params rx_sgl_task_params;
struct iscsi_task_params task_params;
- struct e4_iscsi_task_context *fw_task_ctx;
+ struct iscsi_task_context *fw_task_ctx;
struct iscsi_logout *logout_hdr = NULL;
struct qedi_ctx *qedi = qedi_conn->qedi;
struct qedi_cmd *qedi_cmd;
@@ -1091,9 +1091,9 @@ int qedi_send_iscsi_logout(struct qedi_conn *qedi_conn,
return -ENOMEM;
fw_task_ctx =
- (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
+ (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
tid);
- memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context));
+ memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
qedi_cmd->task_id = tid;
@@ -1434,7 +1434,7 @@ static int send_iscsi_tmf(struct qedi_conn *qedi_conn, struct iscsi_task *mtask,
struct iscsi_tmf_request_hdr tmf_pdu_header;
struct iscsi_task_params task_params;
struct qedi_ctx *qedi = qedi_conn->qedi;
- struct e4_iscsi_task_context *fw_task_ctx;
+ struct iscsi_task_context *fw_task_ctx;
struct iscsi_tm *tmf_hdr;
struct qedi_cmd *qedi_cmd;
struct qedi_cmd *cmd;
@@ -1454,9 +1454,9 @@ static int send_iscsi_tmf(struct qedi_conn *qedi_conn, struct iscsi_task *mtask,
return -ENOMEM;
fw_task_ctx =
- (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
+ (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
tid);
- memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context));
+ memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
qedi_cmd->task_id = tid;
@@ -1548,7 +1548,7 @@ int qedi_send_iscsi_text(struct qedi_conn *qedi_conn,
struct scsi_sgl_task_params tx_sgl_task_params;
struct scsi_sgl_task_params rx_sgl_task_params;
struct iscsi_task_params task_params;
- struct e4_iscsi_task_context *fw_task_ctx;
+ struct iscsi_task_context *fw_task_ctx;
struct qedi_ctx *qedi = qedi_conn->qedi;
struct iscsi_text *text_hdr;
struct scsi_sge *req_sge = NULL;
@@ -1570,9 +1570,9 @@ int qedi_send_iscsi_text(struct qedi_conn *qedi_conn,
return -ENOMEM;
fw_task_ctx =
- (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
+ (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
tid);
- memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context));
+ memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
qedi_cmd->task_id = tid;
@@ -1649,7 +1649,7 @@ int qedi_send_iscsi_nopout(struct qedi_conn *qedi_conn,
struct scsi_sgl_task_params rx_sgl_task_params;
struct iscsi_task_params task_params;
struct qedi_ctx *qedi = qedi_conn->qedi;
- struct e4_iscsi_task_context *fw_task_ctx;
+ struct iscsi_task_context *fw_task_ctx;
struct iscsi_nopout *nopout_hdr;
struct scsi_sge *resp_sge = NULL;
struct qedi_cmd *qedi_cmd;
@@ -1669,9 +1669,9 @@ int qedi_send_iscsi_nopout(struct qedi_conn *qedi_conn,
return -ENOMEM;
fw_task_ctx =
- (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
+ (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
tid);
- memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context));
+ memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
qedi_cmd->task_id = tid;
@@ -1991,7 +1991,7 @@ int qedi_iscsi_send_ioreq(struct iscsi_task *task)
struct iscsi_task_params task_params;
struct iscsi_conn_params conn_params;
struct scsi_initiator_cmd_params cmd_params;
- struct e4_iscsi_task_context *fw_task_ctx;
+ struct iscsi_task_context *fw_task_ctx;
struct iscsi_cls_conn *cls_conn;
struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr;
enum iscsi_task_type task_type = MAX_ISCSI_TASK_TYPE;
@@ -2014,9 +2014,9 @@ int qedi_iscsi_send_ioreq(struct iscsi_task *task)
return -ENOMEM;
fw_task_ctx =
- (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
+ (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
tid);
- memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context));
+ memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
cmd->task_id = tid;
diff --git a/drivers/scsi/qedi/qedi_fw_api.c b/drivers/scsi/qedi/qedi_fw_api.c
index 52772904ef5d..642556a1ce1c 100644
--- a/drivers/scsi/qedi/qedi_fw_api.c
+++ b/drivers/scsi/qedi/qedi_fw_api.c
@@ -202,7 +202,7 @@ static void init_default_iscsi_task(struct iscsi_task_params *task_params,
struct data_hdr *pdu_header,
enum iscsi_task_type task_type)
{
- struct e4_iscsi_task_context *context;
+ struct iscsi_task_context *context;
u32 val;
u16 index;
u8 val_byte;
@@ -224,7 +224,7 @@ static void init_default_iscsi_task(struct iscsi_task_params *task_params,
cpu_to_le16(task_params->conn_icid);
SET_FIELD(context->ustorm_ag_context.flags1,
- E4_USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
+ USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
context->ustorm_st_context.task_type = task_type;
context->ustorm_st_context.cq_rss_number = task_params->cq_rss_number;
@@ -254,7 +254,7 @@ void init_initiator_rw_cdb_ystorm_context(struct ystorm_iscsi_task_st_ctx *ystc,
static
void init_ustorm_task_contexts(struct ustorm_iscsi_task_st_ctx *ustorm_st_cxt,
- struct e4_ustorm_iscsi_task_ag_ctx *ustorm_ag_cxt,
+ struct ustorm_iscsi_task_ag_ctx *ustorm_ag_cxt,
u32 remaining_recv_len, u32 expected_data_transfer_len,
u8 num_sges, bool tx_dif_conn_err_en)
{
@@ -266,12 +266,12 @@ void init_ustorm_task_contexts(struct ustorm_iscsi_task_st_ctx *ustorm_st_cxt,
ustorm_st_cxt->exp_data_transfer_len = val;
SET_FIELD(ustorm_st_cxt->reg1.reg1_map, ISCSI_REG1_NUM_SGES, num_sges);
SET_FIELD(ustorm_ag_cxt->flags2,
- E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN,
+ USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN,
tx_dif_conn_err_en ? 1 : 0);
}
static
-void set_rw_exp_data_acked_and_cont_len(struct e4_iscsi_task_context *context,
+void set_rw_exp_data_acked_and_cont_len(struct iscsi_task_context *context,
struct iscsi_conn_params *conn_params,
enum iscsi_task_type task_type,
u32 task_size,
@@ -470,7 +470,7 @@ void init_rtdif_task_context(struct rdif_task_context *rdif_context,
}
}
-static void set_local_completion_context(struct e4_iscsi_task_context *context)
+static void set_local_completion_context(struct iscsi_task_context *context)
{
SET_FIELD(context->ystorm_st_context.state.flags,
YSTORM_ISCSI_TASK_STATE_LOCAL_COMP, 1);
@@ -487,7 +487,7 @@ static int init_rw_iscsi_task(struct iscsi_task_params *task_params,
struct scsi_dif_task_params *dif_task_params)
{
u32 exp_data_transfer_len = conn_params->max_burst_length;
- struct e4_iscsi_task_context *cxt;
+ struct iscsi_task_context *cxt;
bool slow_io = false;
u32 task_size, val;
u8 num_sges = 0;
@@ -615,7 +615,7 @@ int init_initiator_login_request_task(struct iscsi_task_params *task_params,
struct scsi_sgl_task_params *tx_params,
struct scsi_sgl_task_params *rx_params)
{
- struct e4_iscsi_task_context *cxt;
+ struct iscsi_task_context *cxt;
cxt = task_params->context;
@@ -657,7 +657,7 @@ int init_initiator_nop_out_task(struct iscsi_task_params *task_params,
struct scsi_sgl_task_params *tx_sgl_task_params,
struct scsi_sgl_task_params *rx_sgl_task_params)
{
- struct e4_iscsi_task_context *cxt;
+ struct iscsi_task_context *cxt;
cxt = task_params->context;
@@ -703,7 +703,7 @@ int init_initiator_logout_request_task(struct iscsi_task_params *task_params,
struct scsi_sgl_task_params *tx_params,
struct scsi_sgl_task_params *rx_params)
{
- struct e4_iscsi_task_context *cxt;
+ struct iscsi_task_context *cxt;
cxt = task_params->context;
@@ -758,7 +758,7 @@ int init_initiator_text_request_task(struct iscsi_task_params *task_params,
struct scsi_sgl_task_params *tx_params,
struct scsi_sgl_task_params *rx_params)
{
- struct e4_iscsi_task_context *cxt;
+ struct iscsi_task_context *cxt;
cxt = task_params->context;
diff --git a/drivers/scsi/qedi/qedi_fw_iscsi.h b/drivers/scsi/qedi/qedi_fw_iscsi.h
index 10f19f0af0a3..df2d471a7b51 100644
--- a/drivers/scsi/qedi/qedi_fw_iscsi.h
+++ b/drivers/scsi/qedi/qedi_fw_iscsi.h
@@ -10,7 +10,7 @@
#include "qedi_fw_scsi.h"
struct iscsi_task_params {
- struct e4_iscsi_task_context *context;
+ struct iscsi_task_context *context;
struct iscsi_wqe *sqe;
u32 tx_io_size;
u32 rx_io_size;
diff --git a/drivers/scsi/qedi/qedi_iscsi.h b/drivers/scsi/qedi/qedi_iscsi.h
index a31c5de74754..a282860da0aa 100644
--- a/drivers/scsi/qedi/qedi_iscsi.h
+++ b/drivers/scsi/qedi/qedi_iscsi.h
@@ -182,7 +182,7 @@ struct qedi_cmd {
struct scsi_cmnd *scsi_cmd;
struct scatterlist *sg;
struct qedi_io_bdt io_tbl;
- struct e4_iscsi_task_context request;
+ struct iscsi_task_context request;
unsigned char *sense_buffer;
dma_addr_t sense_buffer_dma;
u16 task_id;
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index e6dc0b495a82..1dec814d8788 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -351,12 +351,12 @@ static int qedi_init_uio(struct qedi_ctx *qedi)
static int qedi_alloc_and_init_sb(struct qedi_ctx *qedi,
struct qed_sb_info *sb_info, u16 sb_id)
{
- struct status_block_e4 *sb_virt;
+ struct status_block *sb_virt;
dma_addr_t sb_phys;
int ret;
sb_virt = dma_alloc_coherent(&qedi->pdev->dev,
- sizeof(struct status_block_e4), &sb_phys,
+ sizeof(struct status_block), &sb_phys,
GFP_KERNEL);
if (!sb_virt) {
QEDI_ERR(&qedi->dbg_ctx,
@@ -865,7 +865,8 @@ static int qedi_set_iscsi_pf_param(struct qedi_ctx *qedi)
qedi->pf_params.iscsi_pf_params.num_uhq_pages_in_ring = num_sq_pages;
qedi->pf_params.iscsi_pf_params.num_queues = qedi->num_queues;
qedi->pf_params.iscsi_pf_params.debug_mode = qedi_fw_debug;
- qedi->pf_params.iscsi_pf_params.two_msl_timer = 4000;
+ qedi->pf_params.iscsi_pf_params.two_msl_timer = QED_TWO_MSL_TIMER_DFLT;
+ qedi->pf_params.iscsi_pf_params.tx_sws_timer = QED_TX_SWS_TIMER_DFLT;
qedi->pf_params.iscsi_pf_params.max_fin_rt = 2;
for (log_page_size = 0 ; log_page_size < 32 ; log_page_size++) {
@@ -1259,7 +1260,7 @@ static bool qedi_process_completions(struct qedi_fastpath *fp)
{
struct qedi_ctx *qedi = fp->qedi;
struct qed_sb_info *sb_info = fp->sb_info;
- struct status_block_e4 *sb = sb_info->sb_virt;
+ struct status_block *sb = sb_info->sb_virt;
struct qedi_percpu_s *p = NULL;
struct global_queue *que;
u16 prod_idx;
@@ -1315,7 +1316,7 @@ static bool qedi_fp_has_work(struct qedi_fastpath *fp)
struct qedi_ctx *qedi = fp->qedi;
struct global_queue *que;
struct qed_sb_info *sb_info = fp->sb_info;
- struct status_block_e4 *sb = sb_info->sb_virt;
+ struct status_block *sb = sb_info->sb_virt;
u16 prod_idx;
barrier();
diff --git a/drivers/soc/fsl/Kconfig b/drivers/soc/fsl/Kconfig
index 4df32bc4c7a6..07d52cafbb31 100644
--- a/drivers/soc/fsl/Kconfig
+++ b/drivers/soc/fsl/Kconfig
@@ -24,6 +24,7 @@ config FSL_MC_DPIO
tristate "QorIQ DPAA2 DPIO driver"
depends on FSL_MC_BUS
select SOC_BUS
+ select DIMLIB
help
Driver for the DPAA2 DPIO object. A DPIO provides queue and
buffer management facilities for software to interact with
diff --git a/drivers/soc/fsl/dpio/dpio-cmd.h b/drivers/soc/fsl/dpio/dpio-cmd.h
index e13fd3ac1939..2fbcb78cdaaf 100644
--- a/drivers/soc/fsl/dpio/dpio-cmd.h
+++ b/drivers/soc/fsl/dpio/dpio-cmd.h
@@ -46,6 +46,9 @@ struct dpio_rsp_get_attr {
__le64 qbman_portal_ci_addr;
/* cmd word 3 */
__le32 qbman_version;
+ __le32 pad1;
+ /* cmd word 4 */
+ __le32 clk;
};
struct dpio_stashing_dest {
diff --git a/drivers/soc/fsl/dpio/dpio-driver.c b/drivers/soc/fsl/dpio/dpio-driver.c
index 7f397b4ad878..dd948889eeab 100644
--- a/drivers/soc/fsl/dpio/dpio-driver.c
+++ b/drivers/soc/fsl/dpio/dpio-driver.c
@@ -162,6 +162,7 @@ static int dpaa2_dpio_probe(struct fsl_mc_device *dpio_dev)
goto err_get_attr;
}
desc.qman_version = dpio_attrs.qbman_version;
+ desc.qman_clk = dpio_attrs.clk;
err = dpio_enable(dpio_dev->mc_io, 0, dpio_dev->mc_handle);
if (err) {
diff --git a/drivers/soc/fsl/dpio/dpio-service.c b/drivers/soc/fsl/dpio/dpio-service.c
index 7351f3030550..3fd0d0840287 100644
--- a/drivers/soc/fsl/dpio/dpio-service.c
+++ b/drivers/soc/fsl/dpio/dpio-service.c
@@ -12,6 +12,7 @@
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
+#include <linux/dim.h>
#include <linux/slab.h>
#include "dpio.h"
@@ -28,6 +29,14 @@ struct dpaa2_io {
spinlock_t lock_notifications;
struct list_head notifications;
struct device *dev;
+
+ /* Net DIM */
+ struct dim rx_dim;
+ /* protect against concurrent Net DIM updates */
+ spinlock_t dim_lock;
+ u16 event_ctr;
+ u64 bytes;
+ u64 frames;
};
struct dpaa2_io_store {
@@ -100,6 +109,17 @@ struct dpaa2_io *dpaa2_io_service_select(int cpu)
}
EXPORT_SYMBOL_GPL(dpaa2_io_service_select);
+static void dpaa2_io_dim_work(struct work_struct *w)
+{
+ struct dim *dim = container_of(w, struct dim, work);
+ struct dim_cq_moder moder =
+ net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
+ struct dpaa2_io *d = container_of(dim, struct dpaa2_io, rx_dim);
+
+ dpaa2_io_set_irq_coalescing(d, moder.usec);
+ dim->state = DIM_START_MEASURE;
+}
+
/**
* dpaa2_io_create() - create a dpaa2_io object.
* @desc: the dpaa2_io descriptor
@@ -114,6 +134,7 @@ struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc,
struct device *dev)
{
struct dpaa2_io *obj = kmalloc(sizeof(*obj), GFP_KERNEL);
+ u32 qman_256_cycles_per_ns;
if (!obj)
return NULL;
@@ -127,7 +148,15 @@ struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc,
obj->dpio_desc = *desc;
obj->swp_desc.cena_bar = obj->dpio_desc.regs_cena;
obj->swp_desc.cinh_bar = obj->dpio_desc.regs_cinh;
+ obj->swp_desc.qman_clk = obj->dpio_desc.qman_clk;
obj->swp_desc.qman_version = obj->dpio_desc.qman_version;
+
+ /* Compute how many 256 QBMAN cycles fit into one ns. This is because
+ * the interrupt timeout period register needs to be specified in QBMAN
+ * clock cycles in increments of 256.
+ */
+ qman_256_cycles_per_ns = 256000 / (obj->swp_desc.qman_clk / 1000000);
+ obj->swp_desc.qman_256_cycles_per_ns = qman_256_cycles_per_ns;
obj->swp = qbman_swp_init(&obj->swp_desc);
if (!obj->swp) {
@@ -138,6 +167,7 @@ struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc,
INIT_LIST_HEAD(&obj->node);
spin_lock_init(&obj->lock_mgmt_cmd);
spin_lock_init(&obj->lock_notifications);
+ spin_lock_init(&obj->dim_lock);
INIT_LIST_HEAD(&obj->notifications);
/* For now only enable DQRR interrupts */
@@ -155,6 +185,12 @@ struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc,
obj->dev = dev;
+ memset(&obj->rx_dim, 0, sizeof(obj->rx_dim));
+ INIT_WORK(&obj->rx_dim.work, dpaa2_io_dim_work);
+ obj->event_ctr = 0;
+ obj->bytes = 0;
+ obj->frames = 0;
+
return obj;
}
@@ -194,6 +230,8 @@ irqreturn_t dpaa2_io_irq(struct dpaa2_io *obj)
struct qbman_swp *swp;
u32 status;
+ obj->event_ctr++;
+
swp = obj->swp;
status = qbman_swp_interrupt_read_status(swp);
if (!status)
@@ -779,3 +817,82 @@ int dpaa2_io_query_bp_count(struct dpaa2_io *d, u16 bpid, u32 *num)
return 0;
}
EXPORT_SYMBOL_GPL(dpaa2_io_query_bp_count);
+
+/**
+ * dpaa2_io_set_irq_coalescing() - Set new IRQ coalescing values
+ * @d: the given DPIO object
+ * @irq_holdoff: interrupt holdoff (timeout) period in us
+ *
+ * Return 0 for success, or negative error code on error.
+ */
+int dpaa2_io_set_irq_coalescing(struct dpaa2_io *d, u32 irq_holdoff)
+{
+ struct qbman_swp *swp = d->swp;
+
+ return qbman_swp_set_irq_coalescing(swp, swp->dqrr.dqrr_size - 1,
+ irq_holdoff);
+}
+EXPORT_SYMBOL(dpaa2_io_set_irq_coalescing);
+
+/**
+ * dpaa2_io_get_irq_coalescing() - Get the current IRQ coalescing parameters
+ * @d: the given DPIO object
+ * @irq_holdoff: interrupt holdoff (timeout) period in us
+ */
+void dpaa2_io_get_irq_coalescing(struct dpaa2_io *d, u32 *irq_holdoff)
+{
+ struct qbman_swp *swp = d->swp;
+
+ qbman_swp_get_irq_coalescing(swp, NULL, irq_holdoff);
+}
+EXPORT_SYMBOL(dpaa2_io_get_irq_coalescing);
+
+/**
+ * dpaa2_io_set_adaptive_coalescing() - Enable/disable adaptive coalescing
+ * @d: the given DPIO object
+ * @use_adaptive_rx_coalesce: adaptive coalescing state
+ */
+void dpaa2_io_set_adaptive_coalescing(struct dpaa2_io *d,
+ int use_adaptive_rx_coalesce)
+{
+ d->swp->use_adaptive_rx_coalesce = use_adaptive_rx_coalesce;
+}
+EXPORT_SYMBOL(dpaa2_io_set_adaptive_coalescing);
+
+/**
+ * dpaa2_io_get_adaptive_coalescing() - Query adaptive coalescing state
+ * @d: the given DPIO object
+ *
+ * Return 1 when adaptive coalescing is enabled on the DPIO object and 0
+ * otherwise.
+ */
+int dpaa2_io_get_adaptive_coalescing(struct dpaa2_io *d)
+{
+ return d->swp->use_adaptive_rx_coalesce;
+}
+EXPORT_SYMBOL(dpaa2_io_get_adaptive_coalescing);
+
+/**
+ * dpaa2_io_update_net_dim() - Update Net DIM
+ * @d: the given DPIO object
+ * @frames: how many frames have been dequeued by the user since the last call
+ * @bytes: how many bytes have been dequeued by the user since the last call
+ */
+void dpaa2_io_update_net_dim(struct dpaa2_io *d, __u64 frames, __u64 bytes)
+{
+ struct dim_sample dim_sample = {};
+
+ if (!d->swp->use_adaptive_rx_coalesce)
+ return;
+
+ spin_lock(&d->dim_lock);
+
+ d->bytes += bytes;
+ d->frames += frames;
+
+ dim_update_sample(d->event_ctr, d->frames, d->bytes, &dim_sample);
+ net_dim(&d->rx_dim, dim_sample);
+
+ spin_unlock(&d->dim_lock);
+}
+EXPORT_SYMBOL(dpaa2_io_update_net_dim);
diff --git a/drivers/soc/fsl/dpio/dpio.c b/drivers/soc/fsl/dpio/dpio.c
index af74c597a675..8ed606ffaac5 100644
--- a/drivers/soc/fsl/dpio/dpio.c
+++ b/drivers/soc/fsl/dpio/dpio.c
@@ -162,6 +162,7 @@ int dpio_get_attributes(struct fsl_mc_io *mc_io,
attr->qbman_portal_ci_offset =
le64_to_cpu(dpio_rsp->qbman_portal_ci_addr);
attr->qbman_version = le32_to_cpu(dpio_rsp->qbman_version);
+ attr->clk = le32_to_cpu(dpio_rsp->clk);
return 0;
}
diff --git a/drivers/soc/fsl/dpio/dpio.h b/drivers/soc/fsl/dpio/dpio.h
index da06f7258098..7fda44f0d7f4 100644
--- a/drivers/soc/fsl/dpio/dpio.h
+++ b/drivers/soc/fsl/dpio/dpio.h
@@ -59,6 +59,7 @@ int dpio_disable(struct fsl_mc_io *mc_io,
* @num_priorities: Number of priorities for the notification channel (1-8);
* relevant only if 'channel_mode = DPIO_LOCAL_CHANNEL'
* @qbman_version: QBMAN version
+ * @clk: QBMAN clock frequency value in Hz
*/
struct dpio_attr {
int id;
@@ -68,6 +69,7 @@ struct dpio_attr {
enum dpio_channel_mode channel_mode;
u8 num_priorities;
u32 qbman_version;
+ u32 clk;
};
int dpio_get_attributes(struct fsl_mc_io *mc_io,
diff --git a/drivers/soc/fsl/dpio/qbman-portal.c b/drivers/soc/fsl/dpio/qbman-portal.c
index f13da4d7d1c5..3474bf5f88d5 100644
--- a/drivers/soc/fsl/dpio/qbman-portal.c
+++ b/drivers/soc/fsl/dpio/qbman-portal.c
@@ -29,6 +29,7 @@
#define QBMAN_CINH_SWP_EQCR_AM_RT 0x980
#define QBMAN_CINH_SWP_RCR_AM_RT 0x9c0
#define QBMAN_CINH_SWP_DQPI 0xa00
+#define QBMAN_CINH_SWP_DQRR_ITR 0xa80
#define QBMAN_CINH_SWP_DCAP 0xac0
#define QBMAN_CINH_SWP_SDQCR 0xb00
#define QBMAN_CINH_SWP_EQCR_AM_RT2 0xb40
@@ -38,6 +39,7 @@
#define QBMAN_CINH_SWP_IER 0xe40
#define QBMAN_CINH_SWP_ISDR 0xe80
#define QBMAN_CINH_SWP_IIR 0xec0
+#define QBMAN_CINH_SWP_ITPR 0xf40
/* CENA register offsets */
#define QBMAN_CENA_SWP_EQCR(n) (0x000 + ((u32)(n) << 6))
@@ -355,6 +357,9 @@ struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
& p->eqcr.pi_ci_mask;
p->eqcr.available = p->eqcr.pi_ring_size;
+ /* Initialize the software portal with a irq timeout period of 0us */
+ qbman_swp_set_irq_coalescing(p, p->dqrr.dqrr_size - 1, 0);
+
return p;
}
@@ -1796,3 +1801,56 @@ u32 qbman_bp_info_num_free_bufs(struct qbman_bp_query_rslt *a)
{
return le32_to_cpu(a->fill);
}
+
+/**
+ * qbman_swp_set_irq_coalescing() - Set new IRQ coalescing values
+ * @p: the software portal object
+ * @irq_threshold: interrupt threshold
+ * @irq_holdoff: interrupt holdoff (timeout) period in us
+ *
+ * Return 0 for success, or negative error code on error.
+ */
+int qbman_swp_set_irq_coalescing(struct qbman_swp *p, u32 irq_threshold,
+ u32 irq_holdoff)
+{
+ u32 itp, max_holdoff;
+
+ /* Convert irq_holdoff value from usecs to 256 QBMAN clock cycles
+ * increments. This depends on the QBMAN internal frequency.
+ */
+ itp = (irq_holdoff * 1000) / p->desc->qman_256_cycles_per_ns;
+ if (itp > 4096) {
+ max_holdoff = (p->desc->qman_256_cycles_per_ns * 4096) / 1000;
+ pr_err("irq_holdoff must be <= %uus\n", max_holdoff);
+ return -EINVAL;
+ }
+
+ if (irq_threshold >= p->dqrr.dqrr_size) {
+ pr_err("irq_threshold must be < %u\n", p->dqrr.dqrr_size - 1);
+ return -EINVAL;
+ }
+
+ p->irq_threshold = irq_threshold;
+ p->irq_holdoff = irq_holdoff;
+
+ qbman_write_register(p, QBMAN_CINH_SWP_DQRR_ITR, irq_threshold);
+ qbman_write_register(p, QBMAN_CINH_SWP_ITPR, itp);
+
+ return 0;
+}
+
+/**
+ * qbman_swp_get_irq_coalescing() - Get the current IRQ coalescing parameters
+ * @p: the software portal object
+ * @irq_threshold: interrupt threshold (an IRQ is generated when there are more
+ * DQRR entries in the portal than the threshold)
+ * @irq_holdoff: interrupt holdoff (timeout) period in us
+ */
+void qbman_swp_get_irq_coalescing(struct qbman_swp *p, u32 *irq_threshold,
+ u32 *irq_holdoff)
+{
+ if (irq_threshold)
+ *irq_threshold = p->irq_threshold;
+ if (irq_holdoff)
+ *irq_holdoff = p->irq_holdoff;
+}
diff --git a/drivers/soc/fsl/dpio/qbman-portal.h b/drivers/soc/fsl/dpio/qbman-portal.h
index c7c2225b7d91..b23883dd2725 100644
--- a/drivers/soc/fsl/dpio/qbman-portal.h
+++ b/drivers/soc/fsl/dpio/qbman-portal.h
@@ -24,6 +24,8 @@ struct qbman_swp_desc {
void *cena_bar; /* Cache-enabled portal base address */
void __iomem *cinh_bar; /* Cache-inhibited portal base address */
u32 qman_version;
+ u32 qman_clk;
+ u32 qman_256_cycles_per_ns;
};
#define QBMAN_SWP_INTERRUPT_EQRI 0x01
@@ -156,6 +158,11 @@ struct qbman_swp {
} eqcr;
spinlock_t access_spinlock;
+
+ /* Interrupt coalescing */
+ u32 irq_threshold;
+ u32 irq_holdoff;
+ int use_adaptive_rx_coalesce;
};
/* Function pointers */
@@ -648,4 +655,10 @@ static inline const struct dpaa2_dq *qbman_swp_dqrr_next(struct qbman_swp *s)
return qbman_swp_dqrr_next_ptr(s);
}
+int qbman_swp_set_irq_coalescing(struct qbman_swp *p, u32 irq_threshold,
+ u32 irq_holdoff);
+
+void qbman_swp_get_irq_coalescing(struct qbman_swp *p, u32 *irq_threshold,
+ u32 *irq_holdoff);
+
#endif /* __FSL_QBMAN_PORTAL_H */
diff --git a/drivers/staging/qlge/qlge_main.c b/drivers/staging/qlge/qlge_main.c
index 8fcdf89da8aa..1dc849378a0f 100644
--- a/drivers/staging/qlge/qlge_main.c
+++ b/drivers/staging/qlge/qlge_main.c
@@ -4614,14 +4614,9 @@ static int qlge_probe(struct pci_dev *pdev,
goto netdev_free;
}
- err = devlink_register(devlink);
- if (err)
- goto netdev_free;
-
err = qlge_health_create_reporters(qdev);
-
if (err)
- goto devlink_unregister;
+ goto netdev_free;
/* Start up the timer to trigger EEH if
* the bus goes dead
@@ -4632,10 +4627,9 @@ static int qlge_probe(struct pci_dev *pdev,
qlge_display_dev_info(ndev);
atomic_set(&qdev->lb_count, 0);
cards_found++;
+ devlink_register(devlink);
return 0;
-devlink_unregister:
- devlink_unregister(devlink);
netdev_free:
free_netdev(ndev);
devlink_free:
@@ -4660,13 +4654,13 @@ static void qlge_remove(struct pci_dev *pdev)
struct net_device *ndev = qdev->ndev;
struct devlink *devlink = priv_to_devlink(qdev);
+ devlink_unregister(devlink);
del_timer_sync(&qdev->timer);
qlge_cancel_all_work_sync(qdev);
unregister_netdev(ndev);
qlge_release_all(pdev);
pci_disable_device(pdev);
devlink_health_reporter_destroy(qdev->reporter);
- devlink_unregister(devlink);
devlink_free(devlink);
free_netdev(ndev);
}
diff --git a/drivers/usb/gadget/function/f_phonet.c b/drivers/usb/gadget/function/f_phonet.c
index 0b468f5d55bc..068ed8417e5a 100644
--- a/drivers/usb/gadget/function/f_phonet.c
+++ b/drivers/usb/gadget/function/f_phonet.c
@@ -267,6 +267,8 @@ static const struct net_device_ops pn_netdev_ops = {
static void pn_net_setup(struct net_device *dev)
{
+ const u8 addr = PN_MEDIA_USB;
+
dev->features = 0;
dev->type = ARPHRD_PHONET;
dev->flags = IFF_POINTOPOINT | IFF_NOARP;
@@ -274,8 +276,9 @@ static void pn_net_setup(struct net_device *dev)
dev->min_mtu = PHONET_MIN_MTU;
dev->max_mtu = PHONET_MAX_MTU;
dev->hard_header_len = 1;
- dev->dev_addr[0] = PN_MEDIA_USB;
dev->addr_len = 1;
+ dev_addr_set(dev, &addr);
+
dev->tx_queue_len = 1;
dev->netdev_ops = &pn_netdev_ops;